Files
apfstests/042
T
Dave Chinner 186b6bd2e8 xfstests: Speed up test 042
test 042 generates a worst-case fragmented filesystem and uses it to
test xfs_fsr. It uses small 4k files to generate the hole-space-hole
pattern that fragments free space badly. It is much faster to
generate the same pattern by creating a single large file and
punching holes in it.  Also, instead of writing large files to
create unfragmented space, just use preallocation so we don't have
to write the data to disk.

These changes reduce the runtime of the test on a single SATA drive
from 106s to 27s.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Alex Elder <aelder@sgi.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2011-02-14 11:22:07 +11:00

171 lines
4.9 KiB
Bash
Executable File

#! /bin/bash
# FS QA Test No. 042
#
# xfs_fsr QA tests
# create a large fragmented file and check that xfs_fsr doesn't corrupt
# it or the other contents of the filesystem
#
#-----------------------------------------------------------------------
# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#-----------------------------------------------------------------------
#
set +x
# creator
owner=ajag@sgi.com
seq=`basename $0`
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
_cleanup()
{
umount $SCRATCH_MNT
rm -f $tmp.*
}
trap "_cleanup ; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
# real QA test starts here
_supported_fs xfs
_supported_os IRIX Linux
_require_scratch
[ "$XFS_FSR_PROG" = "" ] && _notrun "xfs_fsr not found"
_cull_files()
{
perl -e "\$manifest=\"$tmp.manifest\";" -e '
open MANIFEST, $manifest;
@in = <MANIFEST>;
close MANIFEST;
open MANIFEST, ">$manifest";
for ($i = 0; $i < @in; $i++) {
if (($i+1) % 2 == 0) {
# remove every second file
chomp($s = $in[$i]);
if (unlink($s) != 1) {
print "_cull_files: could not delete \"$s\"\n";
exit(1);
}
}
else {
print MANIFEST $in[$i];
}
}
close MANIFEST;
exit(0);'
}
# create a large contiguous file using dd
# use fill2fs to fill the filesystem up with 4k sized files
# fill any remaining space using dd
# delete every second 4k file - remaining free space should be fragmented
# use fill2 to generate a very large file - run it until it fails producing a truncated file
# delete the dd-generated file
# run xfs_fsr on the filesystem
# check checksums for remaining files
# create 3 minimum sized (16Mb) allocation groups
# xfs_repair is going to need three to verify the superblock
rm -f $seq.full
_do_die_on_error=message_only
echo -n "Make a 48 megabyte filesystem on SCRATCH_DEV and mount... "
_scratch_mkfs_xfs -dsize=48m,agcount=3 2>&1 >/dev/null || _fail "mkfs failed"
_scratch_mount || _fail "mount failed"
echo "done"
echo -n "Reserve 16 1Mb unfragmented regions... "
for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
do
_do "$XFS_IO_PROG -f -c \"resvsp 0 1m\" $SCRATCH_MNT/hole$i"
_do "$XFS_IO_PROG -f -c \"resvsp 0 4k\" $SCRATCH_MNT/space$i"
_do "$XFS_IO_PROG -f -c \"resvsp 0 1m\" $SCRATCH_MNT/hole$i"
_do "xfs_bmap -vp $SCRATCH_MNT/hole$i"
done
echo "done"
# set up filesystem
echo -n "Fill filesystem with fill file... "
for i in `seq 0 1 31`; do
_do "$XFS_IO_PROG -fs -c \"pwrite -S$i ${i}m 1m\" $SCRATCH_MNT/fill"
done
_do "xfs_bmap -vp $SCRATCH_MNT/fill"
echo "done"
# flush the filesystem - make sure there is no space "lost" to pre-allocation
_do "umount $SCRATCH_MNT"
_do "_scratch_mount"
echo -n "Use up any further available space using dd... "
_do "dd if=/dev/zero of=$SCRATCH_MNT/pad bs=4096"
echo "done"
# create fragmented file
#_do "Delete every second file" "_cull_files"
echo -n "Punch every second 4k block... "
for i in `seq 0 8 32768`; do
# This generates excessive output that significantly slows down the
# test. It's not necessary for debug, so just bin it.
$XFS_IO_PROG -f -c "unresvsp ${i}k 4k" $SCRATCH_MNT/fill \
> /dev/null 2>&1
done
_do "xfs_bmap -vp $SCRATCH_MNT/fill"
_do "sum $SCRATCH_MNT/fill >$tmp.fillsum1"
echo "done"
echo -n "Create one very large file... "
_do "src/fill2 -d nbytes=16000000,file=$SCRATCH_MNT/fragmented"
echo "done"
_do "xfs_bmap -v $SCRATCH_MNT/fragmented"
_do "sum $SCRATCH_MNT/fragmented >$tmp.sum1"
_do "Remove other files" "rm -rf $SCRATCH_MNT/{pad,hole*}"
# defragment
_do "Run xfs_fsr on filesystem" "$XFS_FSR_PROG -v $SCRATCH_MNT/fragmented"
_do "xfs_bmap -v $SCRATCH_MNT/fragmented"
echo -n "Check fill file... "
_do "sum $SCRATCH_MNT/fill >$tmp.fillsum2"
if ! _do "diff $tmp.fillsum1 $tmp.fillsum2"; then
echo "fail"
echo "Fill file is corrupt/missing after fsr. Test failed see $seq.full"
status=1; exit
fi
echo "done"
# check
echo -n "Check large file... "
_do "sum $SCRATCH_MNT/fragmented >$tmp.sum2"
if ! _do "diff $tmp.sum1 $tmp.sum2"; then
echo "fail"
echo "File is corrupt/missing after fsr. Test failed see $seq.full"
status=1; exit
fi
echo "done"
_do "Checking filesystem" "_check_scratch_fs"
# success, all done
echo "xfs_fsr tests passed."
status=0 ; exit