This is fundamentally the same as the previous growfs vs. log recovery test, with tweaks to support growing the XFS realtime volume on such configurations. Changes include using the appropriate mkfs params, growfs params, and enabling realtime inheritance on the scratch fs. Signed-off-by: Brian Foster <bfoster@xxxxxxxxxx> --- tests/xfs/610 | 71 +++++++++++++++++++++++++++++++++++++++++++++++ tests/xfs/610.out | 7 +++++ 2 files changed, 78 insertions(+) create mode 100755 tests/xfs/610 create mode 100644 tests/xfs/610.out diff --git a/tests/xfs/610 b/tests/xfs/610 new file mode 100755 index 00000000..95ae31be --- /dev/null +++ b/tests/xfs/610 @@ -0,0 +1,71 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2024 Red Hat, Inc. All Rights Reserved. +# +# FS QA Test No. 610 +# +# Test XFS online growfs log recovery. +# +. ./common/preamble +_begin_fstest auto growfs stress shutdown log recoveryloop + +# Import common functions. +. ./common/filter + +_stress_scratch() +{ + procs=4 + nops=999999 + # -w ensures that the only ops are ones which cause write I/O + FSSTRESS_ARGS=`_scale_fsstress_args -d $SCRATCH_MNT -w -p $procs \ + -n $nops $FSSTRESS_AVOID` + $FSSTRESS_PROG $FSSTRESS_ARGS >> $seqres.full 2>&1 & +} + +_require_scratch +_require_realtime + +_scratch_mkfs_xfs | tee -a $seqres.full | _filter_mkfs 2>$tmp.mkfs +. $tmp.mkfs # extract blocksize and data size for scratch device + +endsize=`expr 550 \* 1048576` # stop after growing this big +[ `expr $endsize / $dbsize` -lt $dblocks ] || _notrun "Scratch device too small" + +nags=4 +size=`expr 125 \* 1048576` # 120 megabytes initially +sizeb=`expr $size / $dbsize` # in data blocks +logblks=$(_scratch_find_xfs_min_logblocks -rsize=${size} -dagcount=${nags}) + +_scratch_mkfs_xfs -lsize=${logblks}b -rsize=${size} -dagcount=${nags} \ + >> $seqres.full +_scratch_mount +_xfs_force_bdev realtime $SCRATCH_MNT &> /dev/null + +# Grow the filesystem in random sized chunks while stressing and performing +# shutdown and recovery. The randomization is intended to create a mix of sub-ag +# and multi-ag grows. +while [ $size -le $endsize ]; do + echo "*** stressing a ${sizeb} block filesystem" >> $seqres.full + _stress_scratch + incsize=$((RANDOM % 40 * 1048576)) + size=`expr $size + $incsize` + sizeb=`expr $size / $dbsize` # in data blocks + echo "*** growing to a ${sizeb} block filesystem" >> $seqres.full + xfs_growfs -R ${sizeb} $SCRATCH_MNT >> $seqres.full + + sleep $((RANDOM % 3)) + _scratch_shutdown + ps -e | grep fsstress > /dev/null 2>&1 + while [ $? -eq 0 ]; do + killall -9 fsstress > /dev/null 2>&1 + wait > /dev/null 2>&1 + ps -e | grep fsstress > /dev/null 2>&1 + done + _scratch_cycle_mount || _fail "cycle mount failed" +done > /dev/null 2>&1 +wait # stop for any remaining stress processes + +_scratch_unmount + +status=0 +exit diff --git a/tests/xfs/610.out b/tests/xfs/610.out new file mode 100644 index 00000000..42a6d3ce --- /dev/null +++ b/tests/xfs/610.out @@ -0,0 +1,7 @@ +QA output created by 610 +meta-data=DDEV isize=XXX agcount=N, agsize=XXX blks +data = bsize=XXX blocks=XXX, imaxpct=PCT + = sunit=XXX swidth=XXX, unwritten=X +naming =VERN bsize=XXX +log =LDEV bsize=XXX blocks=XXX +realtime =RDEV extsz=XXX blocks=XXX, rtextents=XXX -- 2.46.2