blob: 5dff079852d330804308ea1703c33e8542f6c337 [file] [log] [blame]
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 Red Hat Inc. All Rights Reserved.
#
# FS QA Test No. 081
#
# Test I/O error path by fully filling an dm snapshot.
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
# Tear down the lvm vg and snapshot.
#
# NOTE: We do the unmount and {vg,pv}remove in a loop here because
# dmeventd could be configured to unmount the filesystem automatically
# after the IO errors. That is racy with the umount we're trying to do
# here because there's a window in which the directory tree has been
# removed from the mount namespaces (so the umount call here sees no
# mount and exits) but the filesystem hasn't yet released the block
# device, which causes the vgremove here to fail.
#
# We "solve" the race by repeating the umount/lvm teardown until the
# block device goes away, because we cannot exit this test without
# removing the lvm devices from the scratch device -- this will cause
# other tests to fail.
while test -e /dev/mapper/$vgname-$snapname || \
test -e /dev/mapper/$vgname-$lvname; do
$UMOUNT_PROG $mnt >> $seqres.full 2>&1
$LVM_PROG vgremove -f $vgname >>$seqres.full 2>&1
$LVM_PROG pvremove -f $SCRATCH_DEV >>$seqres.full 2>&1
test $? -eq 0 && break
sleep 2
done
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_supported_fs generic
_require_test
_require_scratch_nocheck
_require_dm_target snapshot
_require_command $LVM_PROG lvm
echo "Silence is golden"
rm -f $seqres.full
vgname=vg_$seq
lvname=base_$seq
snapname=snap_$seq
mnt=$TEST_DIR/mnt_$seq
mkdir -p $mnt
# make sure there's enough disk space for 256M lv, test for 300M here in case
# lvm uses some space for metadata
_scratch_mkfs_sized $((300 * 1024 * 1024)) >>$seqres.full 2>&1
$LVM_PROG vgcreate -f $vgname $SCRATCH_DEV >>$seqres.full 2>&1
# We use yes pipe instead of 'lvcreate --yes' because old version of lvm
# (like 2.02.95 in RHEL6) don't support --yes option
yes | $LVM_PROG lvcreate -L 256M -n $lvname $vgname >>$seqres.full 2>&1
# wait for lvcreation to fully complete
$UDEV_SETTLE_PROG >>$seqres.full 2>&1
# _mkfs_dev exits the test on failure, this can make sure lv is created in
# above vgcreate/lvcreate steps
_mkfs_dev /dev/mapper/$vgname-$lvname
# create a 4M snapshot
$LVM_PROG lvcreate -s -L 4M -n $snapname $vgname/$lvname >>$seqres.full 2>&1 || \
_fail "Failed to create snapshot"
_mount /dev/mapper/$vgname-$snapname $mnt
# write 5M data to the snapshot
$XFS_IO_PROG -fc "pwrite 0 5m" -c fsync $mnt/testfile >>$seqres.full 2>&1
# _check_dmesg will check for WARNINGs/BUGs in dmesg
status=0
exit