btrfs: test direct IO write against raid5/6 filesystems

Test that a direct IO write works against raid5/6 filesystems and that
after the write operation we are able to read back the correct data
and scrub operations don't find any errors.

This test is motivated by a regression introduced in the merge window
for the 4.13 linux kernel, which was undetected by the current set of
test cases. The issue is fixed by the following patch:

  "Btrfs: fix write corruption due to bio cloning on raid5/6"

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Eryu Guan <eguan@redhat.com>
This commit is contained in:
Filipe Manana
2017-07-13 15:10:40 +01:00
committed by Eryu Guan
parent 6813824fd2
commit c3893c2dc6
3 changed files with 110 additions and 0 deletions
+86
View File
@@ -0,0 +1,86 @@
#! /bin/bash
# FS QA Test No. btrfs/148
#
# Test that direct IO writes work on RAID5 and RAID6 filesystems.
#
#-----------------------------------------------------------------------
#
# Copyright (C) 2017 SUSE Linux Products GmbH. All Rights Reserved.
# Author: Filipe Manana <fdmanana@suse.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#-----------------------------------------------------------------------
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_scratch_dev_pool 4
_require_odirect
_scratch_dev_pool_get 4
rm -f $seqres.full
test_direct_io_write()
{
local mkfs_options=$1
_scratch_pool_mkfs $mkfs_options >>$seqres.full 2>&1
_scratch_mount
$XFS_IO_PROG -f -d -c "pwrite -S 0xab 0 1M" $SCRATCH_MNT/foo \
| _filter_xfs_io
# Now read back the same data, we expect to get what we wrote before.
echo "File data after direct IO write:"
od -t x1 $SCRATCH_MNT/foo | _filter_scratch
_scratch_cycle_mount
echo "File data after umounting and mounting again the filesystem:"
od -t x1 $SCRATCH_MNT/foo | _filter_scratch
$BTRFS_UTIL_PROG scrub start -B $SCRATCH_MNT >>$seqres.full 2>&1
if [ $? -ne 0 ]; then
echo "Scrub found errors" | tee -a $seqres.full
fi
_scratch_unmount
}
echo "Testing RAID5..."
test_direct_io_write "-m raid5 -d raid5"
echo "Testing RAID6..."
test_direct_io_write "-m raid6 -d raid6"
status=0
exit
+23
View File
@@ -0,0 +1,23 @@
QA output created by 148
Testing RAID5...
wrote 1048576/1048576 bytes at offset 0
XXX Bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
File data after direct IO write:
0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
*
4000000
File data after umounting and mounting again the filesystem:
0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
*
4000000
Testing RAID6...
wrote 1048576/1048576 bytes at offset 0
XXX Bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
File data after direct IO write:
0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
*
4000000
File data after umounting and mounting again the filesystem:
0000000 ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab
*
4000000
+1
View File
@@ -150,3 +150,4 @@
145 auto quick send
146 auto quick
147 auto quick send
148 auto quick rw