shared,generic: move tests using duperemove to generic/

Add _require_scratch_duperemove which validates that the file system
supports duperemove.  This allows us to move three tests from shared/
to generic/.  This means these tests will automatically adapt when
duperemove supports other file systems.  Tests moved are:

	shared/008 --> generic/559
	shared/009 --> generic/560
	shared/010 --> generic/561

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Eryu Guan <guaneryu@gmail.com>
Signed-off-by: Eryu Guan <guaneryu@gmail.com>
This commit is contained in:
Theodore Ts'o
2019-06-28 18:59:10 -04:00
committed by Eryu Guan
parent a9868eb48d
commit c3cb6fbde7
10 changed files with 35 additions and 27 deletions
+62
View File
@@ -0,0 +1,62 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
#
# FS QA Test generic/559
#
# Dedupe a single big file and verify integrity
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/reflink
# remove previous $seqres.full before test
rm -f $seqres.full
# real QA test starts here
_supported_fs generic
_supported_os Linux
_require_scratch_duperemove
fssize=$((2 * 1024 * 1024 * 1024))
_scratch_mkfs_sized $fssize > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
# fill the fs with a big file has same contents
$XFS_IO_PROG -f -c "pwrite -S 0x55 0 $fssize" $SCRATCH_MNT/${seq}.file \
>> $seqres.full 2>&1
md5sum $SCRATCH_MNT/${seq}.file > ${tmp}.md5sum
echo "= before cycle mount ="
# Dedupe with 1M blocksize
$DUPEREMOVE_PROG -dr --dedupe-options=same -b 1048576 $SCRATCH_MNT/ >>$seqres.full 2>&1
# Verify integrity
md5sum -c --quiet ${tmp}.md5sum
# Dedupe with 64k blocksize
$DUPEREMOVE_PROG -dr --dedupe-options=same -b 65536 $SCRATCH_MNT/ >>$seqres.full 2>&1
# Verify integrity again
md5sum -c --quiet ${tmp}.md5sum
# umount and mount again, verify pagecache contents don't mutate
_scratch_cycle_mount
echo "= after cycle mount ="
md5sum -c --quiet ${tmp}.md5sum
status=0
exit
+3
View File
@@ -0,0 +1,3 @@
QA output created by 559
= before cycle mount =
= after cycle mount =
+100
View File
@@ -0,0 +1,100 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
#
# FS QA Test generic/560
#
# Iterate dedupe integrity test. Copy an original data0 several
# times (d0 -> d1, d1 -> d2, ... dn-1 -> dn), dedupe dataN everytime
# before copy. At last, verify dataN same with data0.
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/reflink
# remove previous $seqres.full before test
rm -f $seqres.full
# real QA test starts here
_supported_fs generic
_supported_os Linux
_require_scratch_duperemove
_scratch_mkfs > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
function iterate_dedup_verify()
{
local src=$srcdir
local dest=$dupdir/1
for ((index = 1; index <= times; index++)); do
cp -a $src $dest
find $dest -type f -exec md5sum {} \; \
> $md5file$index
# Make some noise
$FSSTRESS_PROG $fsstress_opts -d $noisedir \
-n 200 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
# Too many output, so only save error output
$DUPEREMOVE_PROG -dr --dedupe-options=same $dupdir \
>/dev/null 2>$seqres.full
md5sum -c --quiet $md5file$index
src=$dest
dest=$dupdir/$((index + 1))
done
}
srcdir=$SCRATCH_MNT/src
dupdir=$SCRATCH_MNT/dup
noisedir=$dupdir/noise
mkdir $srcdir $dupdir
mkdir $dupdir/noise
md5file=${tmp}.md5sum
fsstress_opts="-w -r"
# Create some files to be original data
$FSSTRESS_PROG $fsstress_opts -d $srcdir \
-n 500 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
# Calculate how many test cycles will be run
src_size=`du -ks $srcdir | awk '{print $1}'`
free_size=`df -kP $SCRATCH_MNT | grep -v Filesystem | awk '{print $4}'`
times=$((free_size / src_size))
if [ $times -gt $((4 * TIME_FACTOR)) ]; then
times=$((4 * TIME_FACTOR))
fi
echo "= Do dedup and verify ="
iterate_dedup_verify
# Use the last checksum file to verify the original data
sed -e s#dup/$times#src#g $md5file$times > $md5file
echo "= Backwords verify ="
md5sum -c --quiet $md5file
# read from the disk also doesn't show mutations.
_scratch_cycle_mount
echo "= Verify after cycle mount ="
for ((index = 1; index <= times; index++)); do
md5sum -c --quiet $md5file$index
done
status=0
exit
+4
View File
@@ -0,0 +1,4 @@
QA output created by 560
= Do dedup and verify =
= Backwords verify =
= Verify after cycle mount =
+92
View File
@@ -0,0 +1,92 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
#
# FS QA Test generic/561
#
# Dedup & random I/O race test, do multi-threads fsstress and dedupe on
# same directory/files
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
end_test
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/reflink
# remove previous $seqres.full before test
rm -f $seqres.full
# real QA test starts here
_supported_fs generic
_supported_os Linux
_require_scratch_duperemove
_require_command "$KILLALL_PROG" killall
_scratch_mkfs > $seqres.full 2>&1
_scratch_mount >> $seqres.full 2>&1
function end_test()
{
local f=1
# stop duperemove running
if [ -e $dupe_run ]; then
rm -f $dupe_run
wait $dedup_pids
fi
# Make sure all fsstress get killed
while [ $f -ne 0 ]; do
$KILLALL_PROG -q $FSSTRESS_PROG > /dev/null 2>&1
sleep 1
f=`ps -eLf | grep $FSSTRESS_PROG | grep -v "grep" | wc -l`
done
}
sleep_time=$((50 * TIME_FACTOR))
# Start fsstress
testdir="$SCRATCH_MNT/dir"
mkdir $testdir
fsstress_opts="-r -n 1000 -p $((5 * LOAD_FACTOR))"
$FSSTRESS_PROG $fsstress_opts -d $testdir -l 0 >> $seqres.full 2>&1 &
dedup_pids=""
dupe_run=$TEST_DIR/${seq}-running
# Start several dedupe processes on same directory
touch $dupe_run
for ((i = 0; i < $((2 * LOAD_FACTOR)); i++)); do
while [ -e $dupe_run ]; do
$DUPEREMOVE_PROG -dr --dedupe-options=same $testdir \
>>$seqres.full 2>&1
done &
dedup_pids="$! $dedup_pids"
done
# End the test after $sleep_time seconds
sleep $sleep_time
end_test
# umount and mount again, verify pagecache contents don't mutate and a fresh
# read from the disk also doesn't show mutations.
find $testdir -type f -exec md5sum {} \; > ${tmp}.md5sum
_scratch_cycle_mount
md5sum -c --quiet ${tmp}.md5sum
echo "Silence is golden"
status=0
exit
+2
View File
@@ -0,0 +1,2 @@
QA output created by 561
Silence is golden
+3
View File
@@ -561,3 +561,6 @@
556 auto quick casefold
557 auto quick log
558 auto enospc
559 auto stress dedupe
560 auto stress dedupe
561 auto stress dedupe