mirror of
https://github.com/linux-apfs/apfstests.git
synced 2026-05-01 15:01:44 -07:00
47e5d7d2bb
Check kernel BUG, WARNING etc. in dmesg log after each test and fail the test if something is found. dmesg log can be found at result dir. This check now depends on the logging of running tests in dmesg, so this check can be done without clearing dmesg buffer nor dumping all dmesg to a file, which can potentially eat all free space on testing host. Signed-off-by: Eryu Guan <eguan@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
687 lines
15 KiB
Bash
Executable File
687 lines
15 KiB
Bash
Executable File
#!/bin/bash
|
|
#
|
|
# Control script for QA
|
|
#
|
|
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License as
|
|
# published by the Free Software Foundation.
|
|
#
|
|
# This program is distributed in the hope that it would be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write the Free Software Foundation,
|
|
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
#
|
|
#
|
|
|
|
tmp=/tmp/$$
|
|
status=0
|
|
needwrap=true
|
|
needsum=true
|
|
n_try=0
|
|
try=""
|
|
n_bad=0
|
|
sum_bad=0
|
|
bad=""
|
|
notrun=""
|
|
interrupt=true
|
|
diff="diff -u"
|
|
showme=false
|
|
have_test_arg=false
|
|
randomize=false
|
|
export here=`pwd`
|
|
xfile=""
|
|
|
|
# start the initialisation work now
|
|
iam=check
|
|
|
|
export MSGVERB="text:action"
|
|
export QA_CHECK_FS=${QA_CHECK_FS:=true}
|
|
|
|
# number of diff lines from a failed test, 0 for whole output
|
|
export DIFF_LENGTH=${DIFF_LENGTH:=10}
|
|
|
|
# by default don't output timestamps
|
|
timestamp=${TIMESTAMP:=false}
|
|
|
|
rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
|
|
|
|
# we need common/config
|
|
if ! . ./common/config
|
|
then
|
|
echo "$iam: failed to source common/config"
|
|
exit 1
|
|
fi
|
|
|
|
SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
|
|
SRC_GROUPS="generic shared"
|
|
export SRC_DIR="tests"
|
|
|
|
usage()
|
|
{
|
|
echo "Usage: $0 [options] [testlist]"'
|
|
|
|
check options
|
|
-nfs test NFS
|
|
-cifs test CIFS
|
|
-tmpfs test TMPFS
|
|
-l line mode diff
|
|
-udiff show unified diff (default)
|
|
-n show me, do not run tests
|
|
-T output timestamps
|
|
-r randomize test order
|
|
--large-fs optimise scratch device for large filesystems
|
|
-s section run only specified section from config file
|
|
|
|
testlist options
|
|
-g group[,group...] include tests from these groups
|
|
-x group[,group...] exclude tests from these groups
|
|
-X file exclude individual tests
|
|
-E external_file exclude individual tests
|
|
[testlist] include tests matching names in testlist
|
|
'
|
|
exit 0
|
|
}
|
|
|
|
get_group_list()
|
|
{
|
|
grp=$1
|
|
|
|
for d in $SRC_GROUPS $FSTYP; do
|
|
l=$(sed -n < $SRC_DIR/$d/group \
|
|
-e 's/#.*//' \
|
|
-e 's/$/ /' \
|
|
-e "s;\(^[0-9][0-9][0-9]\).* $grp .*;$SRC_DIR/$d/\1;p")
|
|
grpl="$grpl $l"
|
|
done
|
|
echo $grpl
|
|
}
|
|
|
|
# find all tests, excluding files that are test metadata such as group files.
|
|
# This assumes that tests are defined purely by alphanumeric filenames with no
|
|
# ".xyz" extensions in the name.
|
|
get_all_tests()
|
|
{
|
|
touch $tmp.list
|
|
for d in $SRC_GROUPS $FSTYP; do
|
|
ls $SRC_DIR/$d/* | \
|
|
grep -v "\..*" | \
|
|
grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
|
|
done
|
|
}
|
|
|
|
# takes the list of tests to run in $tmp.list, and removes the tests passed to
|
|
# the function from that list.
|
|
trim_test_list()
|
|
{
|
|
test_list="$*"
|
|
|
|
rm -f $tmp.grep
|
|
numsed=0
|
|
for t in $test_list
|
|
do
|
|
if [ $numsed -gt 100 ]; then
|
|
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
|
|
mv $tmp.tmp $tmp.list
|
|
numsed=0
|
|
rm -f $tmp.grep
|
|
fi
|
|
echo "^$t\$" >>$tmp.grep
|
|
numsed=`expr $numsed + 1`
|
|
done
|
|
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
|
|
mv $tmp.tmp $tmp.list
|
|
}
|
|
|
|
|
|
_wallclock()
|
|
{
|
|
date "+%s"
|
|
}
|
|
|
|
_timestamp()
|
|
{
|
|
now=`date "+%T"`
|
|
echo -n " [$now]"
|
|
}
|
|
|
|
_prepare_test_list()
|
|
{
|
|
unset list
|
|
# Tests specified on the command line
|
|
if [ -s $tmp.arglist ]; then
|
|
cat $tmp.arglist > $tmp.list
|
|
else
|
|
touch $tmp.list
|
|
fi
|
|
|
|
# Specified groups to include
|
|
for group in $GROUP_LIST; do
|
|
list=$(get_group_list $group)
|
|
if [ -z "$list" ]; then
|
|
echo "Group \"$group\" is empty or not defined?"
|
|
exit 1
|
|
fi
|
|
|
|
for t in $list; do
|
|
grep -s "^$t\$" $tmp.list >/dev/null || \
|
|
echo "$t" >>$tmp.list
|
|
done
|
|
done
|
|
|
|
if ! $have_test_arg && [ -z "$GROUP_LIST" ]; then
|
|
# no test numbers, do everything
|
|
get_all_tests
|
|
fi
|
|
|
|
# Specified groups to exclude
|
|
for xgroup in $XGROUP_LIST; do
|
|
list=$(get_group_list $xgroup)
|
|
if [ -z "$list" ]; then
|
|
echo "Group \"$xgroup\" is empty or not defined?"
|
|
exit 1
|
|
fi
|
|
|
|
trim_test_list $list
|
|
done
|
|
|
|
# sort the list of tests into numeric order
|
|
list=`sort -n $tmp.list | uniq`
|
|
rm -f $tmp.list $tmp.tmp $tmp.grep
|
|
|
|
if $randomize
|
|
then
|
|
list=`echo $list | awk -f randomize.awk`
|
|
fi
|
|
}
|
|
|
|
# Process command arguments first.
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
-\? | -h | --help) usage ;;
|
|
|
|
-nfs) FSTYP=nfs ;;
|
|
-cifs) FSTYP=cifs ;;
|
|
-tmpfs) FSTYP=tmpfs ;;
|
|
|
|
-g) group=$2 ; shift ;
|
|
GROUP_LIST="$GROUP_LIST $group"
|
|
;;
|
|
|
|
-x) xgroup=$2 ; shift ;
|
|
XGROUP_LIST="$XGROUP_LIST $xgroup"
|
|
;;
|
|
|
|
-X) xfile=$2; shift ;
|
|
for d in $SRC_GROUPS $FSTYP; do
|
|
[ -f $SRC_DIR/$d/$xfile ] || continue
|
|
for f in `cat $SRC_DIR/$d/$xfile`; do
|
|
echo $d/$f >> $tmp.xlist
|
|
done
|
|
done
|
|
;;
|
|
-E) xfile=$2; shift ;
|
|
if [ -f $xfile ]; then
|
|
cat "$xfile" >> $tmp.xlist
|
|
fi
|
|
;;
|
|
-s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
|
|
-l) diff="diff" ;;
|
|
-udiff) diff="$diff -u" ;;
|
|
|
|
-n) showme=true ;;
|
|
-r) randomize=true ;;
|
|
|
|
-T) timestamp=true ;;
|
|
|
|
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
|
|
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
|
|
|
|
-*) usage ;;
|
|
*) # not an argument, we've got tests now.
|
|
have_test_arg=true ;;
|
|
esac
|
|
|
|
# if we've found a test specification, the break out of the processing
|
|
# loop before we shift the arguments so that this is the first argument
|
|
# that we process in the test arg loop below.
|
|
if $have_test_arg; then
|
|
break;
|
|
fi
|
|
|
|
shift
|
|
done
|
|
|
|
# Process tests from command line now.
|
|
if $have_test_arg; then
|
|
while [ $# -gt 0 ]; do
|
|
case "$1" in
|
|
-*) echo "Argments before tests, please!"
|
|
status=1
|
|
exit $status
|
|
;;
|
|
*) test_dir=`dirname $1`
|
|
test_dir=${test_dir#$SRC_DIR/*}
|
|
test_name=`basename $1`
|
|
group_file=$SRC_DIR/$test_dir/group
|
|
|
|
if egrep "^$test_name" $group_file >/dev/null ; then
|
|
# in group file ... OK
|
|
echo $SRC_DIR/$test_dir/$test_name >>$tmp.arglist
|
|
else
|
|
# oops
|
|
echo "$1 - unknown test, ignored"
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
shift
|
|
done
|
|
fi
|
|
|
|
# we need common/rc
|
|
if ! . ./common/rc
|
|
then
|
|
echo "check: failed to source common/rc"
|
|
exit 1
|
|
fi
|
|
|
|
if [ `id -u` -ne 0 ]
|
|
then
|
|
echo "check: QA must be run as root"
|
|
exit 1
|
|
fi
|
|
|
|
_wipe_counters()
|
|
{
|
|
n_try="0"
|
|
n_bad="0"
|
|
unset try notrun bad
|
|
}
|
|
|
|
_wrapup()
|
|
{
|
|
seq="check"
|
|
check="$RESULT_BASE/check"
|
|
|
|
if $showme
|
|
then
|
|
:
|
|
elif $needwrap
|
|
then
|
|
if [ -f $check.time -a -f $tmp.time ]
|
|
then
|
|
cat $check.time $tmp.time \
|
|
| $AWK_PROG '
|
|
{ t[$1] = $2 }
|
|
END { if (NR > 0) {
|
|
for (i in t) print i " " t[i]
|
|
}
|
|
}' \
|
|
| sort -n >$tmp.out
|
|
mv $tmp.out $check.time
|
|
fi
|
|
|
|
echo "" >>$check.log
|
|
date >>$check.log
|
|
echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>$check.log
|
|
$interrupt && echo "Interrupted!" >>$check.log
|
|
|
|
echo "SECTION -- $section" >>$tmp.summary
|
|
echo "=========================" >>$tmp.summary
|
|
if [ ! -z "$n_try" -a $n_try != 0 ]
|
|
then
|
|
echo "Ran:$try"
|
|
echo "Ran:$try" >>$tmp.summary
|
|
fi
|
|
|
|
if [ ! -z "$notrun" ]
|
|
then
|
|
echo "Not run:$notrun"
|
|
echo "Not run:$notrun" >>$check.log
|
|
echo "Not run:$notrun" >>$tmp.summary
|
|
fi
|
|
|
|
if [ ! -z "$n_bad" -a $n_bad != 0 ]
|
|
then
|
|
echo "Failures:$bad"
|
|
echo "Failed $n_bad of $n_try tests"
|
|
echo "Failures:$bad" | fmt >>$check.log
|
|
echo "Failed $n_bad of $n_try tests" >>$check.log
|
|
echo "Failures:$bad" >>$tmp.summary
|
|
echo "Failed $n_bad of $n_try tests" >>$tmp.summary
|
|
else
|
|
echo "Passed all $n_try tests"
|
|
echo "Passed all $n_try tests" >>$check.log
|
|
echo "Passed all $n_try tests" >>$tmp.summary
|
|
fi
|
|
echo "" >>$tmp.summary
|
|
needwrap=false
|
|
fi
|
|
|
|
sum_bad=`expr $sum_bad + $n_bad`
|
|
_wipe_counters
|
|
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
|
|
if ! $OPTIONS_HAVE_SECTIONS; then
|
|
rm -f $tmp.*
|
|
fi
|
|
}
|
|
|
|
_summary()
|
|
{
|
|
_wrapup
|
|
if $showme; then
|
|
:
|
|
elif $needsum; then
|
|
count=`wc -L $tmp.summary | cut -f1 -d" "`
|
|
cat $tmp.summary
|
|
needsum=false
|
|
fi
|
|
rm -f $tmp.*
|
|
}
|
|
|
|
_check_filesystems()
|
|
{
|
|
if [ -f ${RESULT_DIR}/require_test ]; then
|
|
_check_test_fs || err=true
|
|
rm -f ${RESULT_DIR}/require_test
|
|
fi
|
|
if [ -f ${RESULT_DIR}/require_scratch ]; then
|
|
_check_scratch_fs || err=true
|
|
rm -f ${RESULT_DIR}/require_scratch
|
|
fi
|
|
}
|
|
|
|
_check_dmesg()
|
|
{
|
|
if [ ! -f ${RESULT_DIR}/check_dmesg ]; then
|
|
return
|
|
fi
|
|
rm -f ${RESULT_DIR}/check_dmesg
|
|
|
|
# search the dmesg log of last run of $seqnum for possible failures
|
|
# use sed \cregexpc address type, since $seqnum contains "/"
|
|
dmesg | tac | sed -ne "0,\#run fstests $seqnum at $date_time#p" | \
|
|
tac >$seqres.dmesg
|
|
grep -q -e "kernel BUG at" \
|
|
-e "WARNING:" \
|
|
-e "BUG:" \
|
|
-e "Oops:" \
|
|
-e "possible recursive locking detected" \
|
|
$seqres.dmesg
|
|
if [ $? -eq 0 ]; then
|
|
echo "_check_dmesg: something found in dmesg (see $seqres.dmesg)"
|
|
err=true
|
|
else
|
|
rm -f $seqres.dmesg
|
|
fi
|
|
}
|
|
|
|
|
|
_prepare_test_list
|
|
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
trap "_summary; exit \$status" 0 1 2 3 15
|
|
else
|
|
trap "_wrapup; exit \$status" 0 1 2 3 15
|
|
fi
|
|
|
|
for section in $HOST_OPTIONS_SECTIONS; do
|
|
OLD_FSTYP=$FSTYP
|
|
OLD_MOUNT_OPTIONS=$MOUNT_OPTIONS
|
|
get_next_config $section
|
|
|
|
# Do we need to run only some sections ?
|
|
if [ ! -z "$RUN_SECTION" ]; then
|
|
skip=true
|
|
for s in $RUN_SECTION; do
|
|
if [ $section == $s ]; then
|
|
skip=false
|
|
fi
|
|
done
|
|
if $skip; then
|
|
continue
|
|
fi
|
|
fi
|
|
|
|
mkdir -p $RESULT_BASE
|
|
if [ ! -d $RESULT_BASE ]; then
|
|
echo "failed to create results directory $RESULT_BASE"
|
|
exit 1;
|
|
fi
|
|
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
echo "SECTION -- $section"
|
|
fi
|
|
|
|
if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
|
|
echo "RECREATING -- $FSTYP on $TEST_DEV"
|
|
umount $TEST_DEV 2> /dev/null
|
|
if ! _test_mkfs >$tmp.err 2>&1
|
|
then
|
|
echo "our local _test_mkfs routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mkfs \$TEST_DEV using specified options"
|
|
exit 1
|
|
fi
|
|
out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
|
|
if [ $? -ne 1 ]; then
|
|
echo $out
|
|
exit 1
|
|
fi
|
|
_prepare_test_list
|
|
elif [ "$OLD_MOUNT_OPTIONS" != "$MOUNT_OPTIONS" ]; then
|
|
umount $TEST_DEV 2> /dev/null
|
|
out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
|
|
if [ $? -ne 1 ]; then
|
|
echo $out
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
init_rc
|
|
|
|
seq="check"
|
|
check="$RESULT_BASE/check"
|
|
|
|
# don't leave old full output behind on a clean run
|
|
rm -f $check.full
|
|
|
|
[ -f $check.time ] || touch $check.time
|
|
|
|
# print out our test configuration
|
|
echo "FSTYP -- `_full_fstyp_details`"
|
|
echo "PLATFORM -- `_full_platform_details`"
|
|
if [ ! -z "$SCRATCH_DEV" ]; then
|
|
echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
|
|
echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
|
|
fi
|
|
echo
|
|
needwrap=true
|
|
|
|
if [ ! -z "$SCRATCH_DEV" ]; then
|
|
umount $SCRATCH_DEV 2>/dev/null
|
|
# call the overridden mkfs - make sure the FS is built
|
|
# the same as we'll create it later.
|
|
|
|
if ! _scratch_mkfs $flag >$tmp.err 2>&1
|
|
then
|
|
echo "our local _scratch_mkfs routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
|
|
exit 1
|
|
fi
|
|
|
|
# call the overridden mount - make sure the FS mounts with
|
|
# the same options that we'll mount with later.
|
|
if ! _scratch_mount >$tmp.err 2>&1
|
|
then
|
|
echo "our local mount routine ..."
|
|
cat $tmp.err
|
|
echo "check: failed to mount \$SCRATCH_DEV using specified options"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
seqres="$check"
|
|
_check_test_fs
|
|
|
|
for seq in $list
|
|
do
|
|
err=false
|
|
|
|
# the filename for the test and the name output are different.
|
|
# we don't include the tests/ directory in the name output.
|
|
seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
|
|
|
|
# Similarly, the result directory needs to replace the tests/
|
|
# part of the test location.
|
|
group=`dirname $seq`
|
|
if $OPTIONS_HAVE_SECTIONS; then
|
|
export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
|
|
seqres="$RESULT_BASE/$section/$seqnum"
|
|
else
|
|
export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
|
|
seqres="$RESULT_BASE/$seqnum"
|
|
fi
|
|
|
|
mkdir -p $RESULT_DIR
|
|
|
|
echo -n "$seqnum"
|
|
|
|
if $showme
|
|
then
|
|
echo
|
|
continue
|
|
elif [ ! -f $seq ]
|
|
then
|
|
echo " - no such test?"
|
|
else
|
|
# really going to try and run this one
|
|
#
|
|
rm -f $seqres.out.bad
|
|
|
|
# check if we really should run it
|
|
if [ -s $tmp.xlist ]; then
|
|
if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
|
|
echo " [expunged]"
|
|
continue
|
|
fi
|
|
fi
|
|
|
|
# slashes now in names, sed barfs on them so use grep
|
|
lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
|
|
if [ "X$lasttime" != X ]; then
|
|
echo -n " ${lasttime}s ..."
|
|
else
|
|
echo -n " " # prettier output with timestamps.
|
|
fi
|
|
rm -f core $seqres.notrun
|
|
|
|
start=`_wallclock`
|
|
$timestamp && echo -n " ["`date "+%T"`"]"
|
|
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
|
|
$LOGGER_PROG "run xfstest $seqnum"
|
|
if [ -w /dev/kmsg ]; then
|
|
date_time=`date +"%F %T"`
|
|
echo "run fstests $seqnum at $date_time" > /dev/kmsg
|
|
# _check_dmesg depends on this log in dmesg
|
|
touch ${RESULT_DIR}/check_dmesg
|
|
fi
|
|
./$seq >$tmp.rawout 2>&1
|
|
sts=$?
|
|
$timestamp && _timestamp
|
|
stop=`_wallclock`
|
|
|
|
_fix_malloc <$tmp.rawout >$tmp.out
|
|
rm -f $tmp.rawout
|
|
|
|
if [ -f core ]
|
|
then
|
|
echo -n " [dumped core]"
|
|
mv core $RESULT_BASE/$seqnum.core
|
|
err=true
|
|
fi
|
|
|
|
if [ -f $seqres.notrun ]
|
|
then
|
|
$timestamp || echo -n " [not run] "
|
|
$timestamp && echo " [not run]" && echo -n " $seqnum -- "
|
|
cat $seqres.notrun
|
|
notrun="$notrun $seqnum"
|
|
else
|
|
if [ $sts -ne 0 ]
|
|
then
|
|
echo -n " [failed, exit status $sts]"
|
|
err=true
|
|
fi
|
|
if [ ! -f $seq.out ]
|
|
then
|
|
echo " - no qualified output"
|
|
err=true
|
|
else
|
|
|
|
# coreutils 8.16+ changed quote formats in error messages from
|
|
# `foo' to 'foo'. Filter old versions to match the new version.
|
|
sed -i "s/\`/\'/g" $tmp.out
|
|
if diff $seq.out $tmp.out >/dev/null 2>&1
|
|
then
|
|
if $err
|
|
then
|
|
:
|
|
else
|
|
echo "$seqnum `expr $stop - $start`" >>$tmp.time
|
|
echo -n " `expr $stop - $start`s"
|
|
fi
|
|
echo ""
|
|
else
|
|
echo " - output mismatch (see $seqres.out.bad)"
|
|
mv $tmp.out $seqres.out.bad
|
|
$diff $seq.out $seqres.out.bad | {
|
|
if test "$DIFF_LENGTH" -le 0; then
|
|
cat
|
|
else
|
|
head -n "$DIFF_LENGTH"
|
|
echo "..."
|
|
echo "(Run '$diff $seq.out $seqres.out.bad'" \
|
|
" to see the entire diff)"
|
|
fi; } | \
|
|
sed -e 's/^\(.\)/ \1/'
|
|
err=true
|
|
fi
|
|
fi
|
|
try="$try $seqnum"
|
|
n_try=`expr $n_try + 1`
|
|
_check_filesystems
|
|
_check_dmesg
|
|
fi
|
|
|
|
fi
|
|
|
|
# come here for each test, except when $showme is true
|
|
#
|
|
if $err
|
|
then
|
|
bad="$bad $seqnum"
|
|
n_bad=`expr $n_bad + 1`
|
|
quick=false
|
|
fi
|
|
|
|
seq="after_$seqnum"
|
|
done
|
|
_wrapup
|
|
echo
|
|
|
|
umount $TEST_DEV 2> /dev/null
|
|
umount $SCRATCH_DEV 2> /dev/null
|
|
done
|
|
|
|
interrupt=false
|
|
status=`expr $sum_bad`
|
|
exit
|