Files
apfstests/tests/generic/560
T
Theodore Ts'o c3cb6fbde7 shared,generic: move tests using duperemove to generic/
Add _require_scratch_duperemove which validates that the file system
supports duperemove.  This allows us to move three tests from shared/
to generic/.  This means these tests will automatically adapt when
duperemove supports other file systems.  Tests moved are:

	shared/008 --> generic/559
	shared/009 --> generic/560
	shared/010 --> generic/561

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Eryu Guan <guaneryu@gmail.com>
Signed-off-by: Eryu Guan <guaneryu@gmail.com>
2019-07-05 16:10:10 +08:00

101 lines
2.4 KiB
Bash
Executable File

#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
#
# FS QA Test generic/560
#
# Iterate dedupe integrity test. Copy an original data0 several
# times (d0 -> d1, d1 -> d2, ... dn-1 -> dn), dedupe dataN everytime
# before copy. At last, verify dataN same with data0.
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/reflink
# remove previous $seqres.full before test
rm -f $seqres.full
# real QA test starts here
_supported_fs generic
_supported_os Linux
_require_scratch_duperemove
_scratch_mkfs > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
function iterate_dedup_verify()
{
local src=$srcdir
local dest=$dupdir/1
for ((index = 1; index <= times; index++)); do
cp -a $src $dest
find $dest -type f -exec md5sum {} \; \
> $md5file$index
# Make some noise
$FSSTRESS_PROG $fsstress_opts -d $noisedir \
-n 200 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
# Too many output, so only save error output
$DUPEREMOVE_PROG -dr --dedupe-options=same $dupdir \
>/dev/null 2>$seqres.full
md5sum -c --quiet $md5file$index
src=$dest
dest=$dupdir/$((index + 1))
done
}
srcdir=$SCRATCH_MNT/src
dupdir=$SCRATCH_MNT/dup
noisedir=$dupdir/noise
mkdir $srcdir $dupdir
mkdir $dupdir/noise
md5file=${tmp}.md5sum
fsstress_opts="-w -r"
# Create some files to be original data
$FSSTRESS_PROG $fsstress_opts -d $srcdir \
-n 500 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
# Calculate how many test cycles will be run
src_size=`du -ks $srcdir | awk '{print $1}'`
free_size=`df -kP $SCRATCH_MNT | grep -v Filesystem | awk '{print $4}'`
times=$((free_size / src_size))
if [ $times -gt $((4 * TIME_FACTOR)) ]; then
times=$((4 * TIME_FACTOR))
fi
echo "= Do dedup and verify ="
iterate_dedup_verify
# Use the last checksum file to verify the original data
sed -e s#dup/$times#src#g $md5file$times > $md5file
echo "= Backwords verify ="
md5sum -c --quiet $md5file
# read from the disk also doesn't show mutations.
_scratch_cycle_mount
echo "= Verify after cycle mount ="
for ((index = 1; index <= times; index++)); do
md5sum -c --quiet $md5file$index
done
status=0
exit