mirror of
https://github.com/linux-apfs/apfstests.git
synced 2026-05-01 15:01:44 -07:00
8c16b061f7
In btrfs-progs v4.10 we had a behaviour change where starting a balance
operation without any filters results in a delay of 10 seconds and a
warning is printed to stdout that warns that a full balance is about to
be made and that it can be a slow operation. The new flag '--full-balance'
was added in that release to avoid the 10 seconds delay and the warning
message.
Our existing helper _run_btrfs_balance_start() uses that new balance flag
if we are running a btrfs-progs version that has it, to avoid that 10
seconds wait.
Make all existing btrfs tests that trigger balance operations use the
_run_btrfs_balance_start() helper, so that we avoid wasting time and
speed up some of the tests. In particular test btrfs/014 is now about 10x
faster and tests btrfs/060 to btrfs/064 3 to 5 times faster (depending
on the fsstress random load).
Besides speeding up many tests that do balance operations it also fixes
functional problems:
1) Since btrfs-progs v4.10 the test case btrfs/014 got broken, because
its purpose is to run balance and snapshot creation in parallel,
and that wasn't happening anymore because all snapshots were being
created during the 10 seconds delay of the first balance operation,
so balance and snapshot creation was being serialized instead of
running in parallel.
Fixing this test to avoid the 10 seconds delay immediately
exposes a regression that went into kernel 5.7-rc1 which is fixed
by the following commit
aec7db3b13a0 ("btrfs: fix setting last_trans for reloc roots")
2) Test cases btrfs/060 to btrfs/064 now spend much more time running
fsstress, balance and other operations in parallel, there's no
longer intervals of 10 seconds where balance is not running
concurrently with those other operations, making the tests a lot
more useful again.
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: Eryu Guan <guaneryu@gmail.com>
Signed-off-by: Eryu Guan <guaneryu@gmail.com>
194 lines
4.6 KiB
Bash
Executable File
194 lines
4.6 KiB
Bash
Executable File
#! /bin/bash
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
# Copyright (c) 2011 Oracle. All Rights Reserved.
|
|
#
|
|
# FS QA Test No. btrfs/003
|
|
#
|
|
# btrfs vol tests
|
|
#
|
|
seq=`basename $0`
|
|
seqres=$RESULT_DIR/$seq
|
|
echo "QA output created by $seq"
|
|
|
|
here=`pwd`
|
|
tmp=/tmp/$$
|
|
status=1 # failure is the default!
|
|
dev_removed=0
|
|
removed_dev_htl=""
|
|
trap "_cleanup; exit \$status" 0 1 2 3 15
|
|
|
|
# Check if all scratch dev pools are deletable
|
|
deletable_scratch_dev_pool()
|
|
{
|
|
local i
|
|
local x
|
|
for i in $SCRATCH_DEV_POOL; do
|
|
x=`echo $i | cut -d"/" -f 3`
|
|
if [ ! -f /sys/class/block/${x}/device/delete ]; then
|
|
return 1
|
|
fi
|
|
done
|
|
|
|
return 0
|
|
}
|
|
|
|
_cleanup()
|
|
{
|
|
cd /
|
|
rm -f $tmp.*
|
|
if [ $dev_removed == 1 ]; then
|
|
_scratch_unmount
|
|
_devmgt_add "${removed_dev_htl}"
|
|
fi
|
|
}
|
|
|
|
# get standard environment, filters and checks
|
|
. ./common/rc
|
|
. ./common/filter
|
|
|
|
_supported_fs btrfs
|
|
_supported_os Linux
|
|
_require_scratch
|
|
_require_scratch_dev_pool 4
|
|
_require_command "$WIPEFS_PROG" wipefs
|
|
|
|
rm -f $seqres.full
|
|
|
|
# Test cases related to raid in btrfs
|
|
_test_raid0()
|
|
{
|
|
export MKFS_OPTIONS="-m raid0 -d raid0"
|
|
_scratch_pool_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10 -c
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_raid1()
|
|
{
|
|
export MKFS_OPTIONS="-m raid1 -d raid1"
|
|
_scratch_pool_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10 -c
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_raid10()
|
|
{
|
|
export MKFS_OPTIONS="-m raid10 -d raid10"
|
|
_scratch_pool_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_single()
|
|
{
|
|
export MKFS_OPTIONS="-m single -d single"
|
|
_scratch_pool_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_add()
|
|
{
|
|
local i
|
|
local -a devs="( $SCRATCH_DEV_POOL )"
|
|
local n=${#devs[@]}
|
|
|
|
n=$(($n-1))
|
|
|
|
export MKFS_OPTIONS=""
|
|
_scratch_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10
|
|
for i in `seq 2 $n`; do
|
|
$WIPEFS_PROG -a ${devs[$i]} >> $seqres.full 2>&1 || \
|
|
_fail "wipefs failed"
|
|
$BTRFS_UTIL_PROG device add ${devs[$i]} $SCRATCH_MNT >> $seqres.full 2>&1 || _fail "device add failed"
|
|
done
|
|
_run_btrfs_balance_start $SCRATCH_MNT >> $seqres.full
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_replace()
|
|
{
|
|
local i
|
|
local devs=( $SCRATCH_DEV_POOL )
|
|
local n=${#devs[@]}
|
|
local ds
|
|
local d
|
|
|
|
# If scratch devs are not deletable skip this test
|
|
if ! deletable_scratch_dev_pool; then return 0; fi
|
|
|
|
# exclude the first and the last disk in the disk pool
|
|
n=$(($n-1))
|
|
ds=${devs[@]:1:$(($n-1))}
|
|
|
|
export MKFS_OPTIONS="-m raid1 -d raid1"
|
|
_scratch_mkfs "$ds" >> $seqres.full 2>&1 || _fail "tr: mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10
|
|
|
|
#pick the 2nd last disk
|
|
ds=${devs[@]:$(($n-1)):1}
|
|
|
|
# retrive the HTL for this scsi disk
|
|
d=`echo $ds|cut -d"/" -f3`
|
|
removed_dev_htl=`ls -l /sys/class/block/${d} | rev | cut -d "/" -f 3 | rev`
|
|
|
|
#fail disk
|
|
_devmgt_remove ${removed_dev_htl} $ds
|
|
dev_removed=1
|
|
|
|
$BTRFS_UTIL_PROG filesystem show $SCRATCH_DEV | grep "Some devices missing" >> $seqres.full || _fail \
|
|
"btrfs did not report device missing"
|
|
|
|
# add a new disk to btrfs
|
|
ds=${devs[@]:$(($n)):1}
|
|
$WIPEFS_PROG -a ${ds} >> $seqres.full 2>&1 || _fail "wipefs failed"
|
|
$BTRFS_UTIL_PROG device add ${ds} $SCRATCH_MNT >> $seqres.full 2>&1 || _fail "dev add failed"
|
|
# in some system balance fails if there is no delay (a bug)
|
|
# putting sleep 10 to work around as of now
|
|
# sleep 10
|
|
_run_btrfs_balance_start $SCRATCH_MNT >> $seqres.full
|
|
|
|
# cleaup. add the removed disk
|
|
_scratch_unmount
|
|
_devmgt_add "${removed_dev_htl}"
|
|
dev_removed=0
|
|
}
|
|
|
|
_test_remove()
|
|
{
|
|
_scratch_pool_mkfs >> $seqres.full 2>&1 || _fail "mkfs failed"
|
|
_scratch_mount
|
|
dirp=`mktemp -duq $SCRATCH_MNT/dir.XXXXXX`
|
|
_populate_fs -n 1 -f 20 -d 10 -r $dirp -s 10
|
|
|
|
# pick last dev in the list
|
|
dev_del=`echo ${SCRATCH_DEV_POOL} | awk '{print $NF}'`
|
|
$BTRFS_UTIL_PROG device delete $dev_del $SCRATCH_MNT || _fail "btrfs device delete failed"
|
|
$BTRFS_UTIL_PROG filesystem show $SCRATCH_DEV 2>&1 | grep $dev_del >> $seqres.full && _fail "btrfs still shows the deleted dev"
|
|
_scratch_unmount
|
|
}
|
|
|
|
_test_raid0
|
|
_test_raid1
|
|
_test_raid10
|
|
_test_single
|
|
_test_add
|
|
_test_replace
|
|
_test_remove
|
|
|
|
echo "Silence is golden"
|
|
status=0; exit
|