| #!/bin/bash |
| # |
| # Control script for QA |
| # |
| # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved. |
| # |
| # This program is free software; you can redistribute it and/or |
| # modify it under the terms of the GNU General Public License as |
| # published by the Free Software Foundation. |
| # |
| # This program is distributed in the hope that it would be useful, |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| # GNU General Public License for more details. |
| # |
| # You should have received a copy of the GNU General Public License |
| # along with this program; if not, write the Free Software Foundation, |
| # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| # |
| # |
| |
| tmp=/tmp/$$ |
| status=0 |
| needwrap=true |
| needsum=true |
| n_try=0 |
| try="" |
| n_bad=0 |
| sum_bad=0 |
| bad="" |
| n_notrun=0 |
| notrun="" |
| interrupt=true |
| diff="diff -u" |
| showme=false |
| have_test_arg=false |
| randomize=false |
| export here=`pwd` |
| xfile="" |
| brief_test_summary=false |
| _err_msg="" |
| do_report=false |
| DUMP_OUTPUT=false |
| |
| # start the initialisation work now |
| iam=check |
| |
| export MSGVERB="text:action" |
| export QA_CHECK_FS=${QA_CHECK_FS:=true} |
| |
| # number of diff lines from a failed test, 0 for whole output |
| export DIFF_LENGTH=${DIFF_LENGTH:=10} |
| |
| # by default don't output timestamps |
| timestamp=${TIMESTAMP:=false} |
| |
| rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.* |
| |
| SRC_GROUPS="generic shared" |
| export SRC_DIR="tests" |
| |
| usage() |
| { |
| echo "Usage: $0 [options] [testlist]"' |
| |
| check options |
| -nfs test NFS |
| -glusterfs test GlusterFS |
| -cifs test CIFS |
| -9p test 9p |
| -overlay test overlay |
| -pvfs2 test PVFS2 |
| -tmpfs test TMPFS |
| -ubifs test ubifs |
| -l line mode diff |
| -udiff show unified diff (default) |
| -n show me, do not run tests |
| -T output timestamps |
| -r randomize test order |
| -d dump test output to stdout |
| -b brief test summary |
| -R fmt[,fmt] generate report in formats specified. Supported format: [xunit] |
| --large-fs optimise scratch device for large filesystems |
| -s section run only specified section from config file |
| -S section exclude the specified section from the config file |
| |
| testlist options |
| -g group[,group...] include tests from these groups |
| -x group[,group...] exclude tests from these groups |
| -X exclude_file exclude individual tests |
| -E external_file exclude individual tests |
| [testlist] include tests matching names in testlist |
| |
| testlist argument is a list of tests in the form of <test dir>/<test name>. |
| |
| <test dir> is a directory under tests that contains a group file, |
| with a list of the names of the tests in that directory. |
| |
| <test name> may be either a specific test file name (e.g. xfs/001) or |
| a test file name match pattern (e.g. xfs/*). |
| |
| group argument is either a name of a tests group to collect from all |
| the test dirs (e.g. quick) or a name of a tests group to collect from |
| a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick). |
| If you want to run all the tests in the test suite, use "-g all" to specify all |
| groups. |
| |
| exclude_file argument refers to a name of a file inside each test directory. |
| for every test dir where this file is found, the listed test names are |
| excluded from the list of tests to run from that test dir. |
| |
| external_file argument is a path to a single file containing a list of tests |
| to exclude in the form of <test dir>/<test name>. |
| |
| examples: |
| check xfs/001 |
| check -g quick |
| check -g xfs/quick |
| check -x stress xfs/* |
| check -X .exclude -g auto |
| check -E ~/.xfstests.exclude |
| ' |
| exit 0 |
| } |
| |
| get_sub_group_list() |
| { |
| local d=$1 |
| local grp=$2 |
| |
| test -s "$SRC_DIR/$d/group" || return 1 |
| |
| local grpl=$(sed -n < $SRC_DIR/$d/group \ |
| -e 's/#.*//' \ |
| -e 's/$/ /' \ |
| -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p") |
| echo $grpl |
| } |
| |
| get_group_list() |
| { |
| local grp=$1 |
| local grpl="" |
| local sub=$(dirname $grp) |
| |
| if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then |
| # group is given as <subdir>/<group> (e.g. xfs/quick) |
| grp=$(basename $grp) |
| get_sub_group_list $sub $grp |
| return |
| fi |
| |
| for d in $SRC_GROUPS $FSTYP; do |
| if ! test -d "$SRC_DIR/$d" ; then |
| continue |
| fi |
| grpl="$grpl $(get_sub_group_list $d $grp)" |
| done |
| echo $grpl |
| } |
| |
| # Find all tests, excluding files that are test metadata such as group files. |
| # It matches test names against $VALID_TEST_NAME defined in common/rc |
| get_all_tests() |
| { |
| touch $tmp.list |
| for d in $SRC_GROUPS $FSTYP; do |
| if ! test -d "$SRC_DIR/$d" ; then |
| continue |
| fi |
| ls $SRC_DIR/$d/* | \ |
| grep -v "\..*" | \ |
| grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \ |
| grep -v "group\|Makefile" >> $tmp.list 2>/dev/null |
| done |
| } |
| |
| # takes the list of tests to run in $tmp.list, and removes the tests passed to |
| # the function from that list. |
| trim_test_list() |
| { |
| test_list="$*" |
| |
| rm -f $tmp.grep |
| numsed=0 |
| for t in $test_list |
| do |
| if [ $numsed -gt 100 ]; then |
| grep -v -f $tmp.grep <$tmp.list >$tmp.tmp |
| mv $tmp.tmp $tmp.list |
| numsed=0 |
| rm -f $tmp.grep |
| fi |
| echo "^$t\$" >>$tmp.grep |
| numsed=`expr $numsed + 1` |
| done |
| grep -v -f $tmp.grep <$tmp.list >$tmp.tmp |
| mv $tmp.tmp $tmp.list |
| rm -f $tmp.grep |
| } |
| |
| |
| _wallclock() |
| { |
| date "+%s" |
| } |
| |
| _timestamp() |
| { |
| now=`date "+%T"` |
| echo -n " [$now]" |
| } |
| |
| _prepare_test_list() |
| { |
| unset list |
| # Tests specified on the command line |
| if [ -s $tmp.arglist ]; then |
| cat $tmp.arglist > $tmp.list |
| else |
| touch $tmp.list |
| fi |
| |
| # Specified groups to include |
| # Note that the CLI processing adds a leading space to the first group |
| # parameter, so we have to catch that here checking for "all" |
| if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then |
| # no test numbers, do everything |
| get_all_tests |
| else |
| for group in $GROUP_LIST; do |
| list=$(get_group_list $group) |
| if [ -z "$list" ]; then |
| echo "Group \"$group\" is empty or not defined?" |
| exit 1 |
| fi |
| |
| for t in $list; do |
| grep -s "^$t\$" $tmp.list >/dev/null || \ |
| echo "$t" >>$tmp.list |
| done |
| done |
| fi |
| |
| # Specified groups to exclude |
| for xgroup in $XGROUP_LIST; do |
| list=$(get_group_list $xgroup) |
| if [ -z "$list" ]; then |
| echo "Group \"$xgroup\" is empty or not defined?" |
| exit 1 |
| fi |
| |
| trim_test_list $list |
| done |
| |
| # sort the list of tests into numeric order |
| list=`sort -n $tmp.list | uniq` |
| rm -f $tmp.list |
| |
| if $randomize |
| then |
| list=`echo $list | awk -f randomize.awk` |
| fi |
| } |
| |
| # Process command arguments first. |
| while [ $# -gt 0 ]; do |
| case "$1" in |
| -\? | -h | --help) usage ;; |
| |
| -nfs) FSTYP=nfs ;; |
| -glusterfs) FSTYP=glusterfs ;; |
| -cifs) FSTYP=cifs ;; |
| -9p) FSTYP=9p ;; |
| -overlay) FSTYP=overlay; export OVERLAY=true ;; |
| -pvfs2) FSTYP=pvfs2 ;; |
| -tmpfs) FSTYP=tmpfs ;; |
| -ubifs) FSTYP=ubifs ;; |
| |
| -g) group=$2 ; shift ; |
| GROUP_LIST="$GROUP_LIST ${group//,/ }" |
| ;; |
| |
| -x) xgroup=$2 ; shift ; |
| XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }" |
| ;; |
| |
| -X) xfile=$2; shift ; |
| for d in $SRC_GROUPS $FSTYP; do |
| [ -f $SRC_DIR/$d/$xfile ] || continue |
| for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do |
| echo $d/$f >> $tmp.xlist |
| done |
| done |
| ;; |
| -E) xfile=$2; shift ; |
| if [ -f $xfile ]; then |
| sed "s/#.*$//" "$xfile" >> $tmp.xlist |
| fi |
| ;; |
| -s) RUN_SECTION="$RUN_SECTION $2"; shift ;; |
| -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;; |
| -l) diff="diff" ;; |
| -udiff) diff="$diff -u" ;; |
| |
| -n) showme=true ;; |
| -r) randomize=true ;; |
| |
| -T) timestamp=true ;; |
| -d) DUMP_OUTPUT=true ;; |
| -b) brief_test_summary=true;; |
| -R) report_fmt=$2 ; shift ; |
| REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }" |
| do_report=true |
| ;; |
| --large-fs) export LARGE_SCRATCH_DEV=yes ;; |
| --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;; |
| |
| -*) usage ;; |
| *) # not an argument, we've got tests now. |
| have_test_arg=true ;; |
| esac |
| |
| # if we've found a test specification, the break out of the processing |
| # loop before we shift the arguments so that this is the first argument |
| # that we process in the test arg loop below. |
| if $have_test_arg; then |
| break; |
| fi |
| |
| shift |
| done |
| |
| # we need common/config, source it after processing args, overlay needs FSTYP |
| # set before sourcing common/config |
| if ! . ./common/config; then |
| echo "$iam: failed to source common/config" |
| exit 1 |
| fi |
| |
| # Process tests from command line now. |
| if $have_test_arg; then |
| while [ $# -gt 0 ]; do |
| case "$1" in |
| -*) echo "Arguments before tests, please!" |
| status=1 |
| exit $status |
| ;; |
| *) # Expand test pattern (e.g. xfs/???, *fs/001) |
| list=$(cd $SRC_DIR; echo $1) |
| for t in $list; do |
| test_dir=`dirname $t` |
| test_dir=${test_dir#$SRC_DIR/*} |
| test_name=`basename $t` |
| group_file=$SRC_DIR/$test_dir/group |
| |
| if egrep -q "^$test_name" $group_file; then |
| # in group file ... OK |
| echo $SRC_DIR/$test_dir/$test_name \ |
| >>$tmp.arglist |
| else |
| # oops |
| echo "$t - unknown test, ignored" |
| fi |
| done |
| ;; |
| esac |
| |
| shift |
| done |
| elif [ -z "$GROUP_LIST" ]; then |
| # default group list is the auto group. If any other group or test is |
| # specified, we use that instead. |
| GROUP_LIST="auto" |
| fi |
| |
| # we need common/rc |
| if ! . ./common/rc |
| then |
| echo "check: failed to source common/rc" |
| exit 1 |
| fi |
| |
| if [ `id -u` -ne 0 ] |
| then |
| echo "check: QA must be run as root" |
| exit 1 |
| fi |
| |
| _wipe_counters() |
| { |
| n_try="0" |
| n_bad="0" |
| n_notrun="0" |
| unset try notrun bad |
| } |
| |
| _wrapup() |
| { |
| seq="check" |
| check="$RESULT_BASE/check" |
| |
| if $showme; then |
| if $needwrap; then |
| if $do_report; then |
| _make_section_report |
| fi |
| needwrap=false |
| fi |
| elif $needwrap; then |
| if [ -f $check.time -a -f $tmp.time ]; then |
| cat $check.time $tmp.time \ |
| | $AWK_PROG ' |
| { t[$1] = $2 } |
| END { |
| if (NR > 0) { |
| for (i in t) print i " " t[i] |
| } |
| }' \ |
| | sort -n >$tmp.out |
| mv $tmp.out $check.time |
| fi |
| |
| echo "" >>$check.log |
| date >>$check.log |
| |
| echo "SECTION -- $section" >>$tmp.summary |
| echo "=========================" >>$tmp.summary |
| if [ ! -z "$n_try" -a $n_try != 0 ]; then |
| if [ $brief_test_summary == "false" ]; then |
| echo "Ran:$try" |
| echo "Ran:$try" >>$tmp.summary |
| fi |
| echo "Ran:$try" >>$check.log |
| fi |
| |
| $interrupt && echo "Interrupted!" | tee -a $check.log |
| |
| if [ ! -z "$notrun" ]; then |
| if [ $brief_test_summary == "false" ]; then |
| echo "Not run:$notrun" |
| echo "Not run:$notrun" >>$tmp.summary |
| fi |
| echo "Not run:$notrun" >>$check.log |
| fi |
| |
| if [ ! -z "$n_bad" -a $n_bad != 0 ]; then |
| echo "Failures:$bad" |
| echo "Failed $n_bad of $n_try tests" |
| echo "Failures:$bad" >>$check.log |
| echo "Failed $n_bad of $n_try tests" >>$check.log |
| echo "Failures:$bad" >>$tmp.summary |
| echo "Failed $n_bad of $n_try tests" >>$tmp.summary |
| else |
| echo "Passed all $n_try tests" |
| echo "Passed all $n_try tests" >>$check.log |
| echo "Passed all $n_try tests" >>$tmp.summary |
| fi |
| echo "" >>$tmp.summary |
| if $do_report; then |
| _make_section_report |
| fi |
| needwrap=false |
| fi |
| |
| sum_bad=`expr $sum_bad + $n_bad` |
| _wipe_counters |
| rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time |
| if ! $OPTIONS_HAVE_SECTIONS; then |
| rm -f $tmp.* |
| fi |
| } |
| |
| _summary() |
| { |
| _wrapup |
| if $showme; then |
| : |
| elif $needsum; then |
| count=`wc -L $tmp.summary | cut -f1 -d" "` |
| cat $tmp.summary |
| needsum=false |
| fi |
| rm -f $tmp.* |
| } |
| |
| _check_filesystems() |
| { |
| if [ -f ${RESULT_DIR}/require_test ]; then |
| _check_test_fs || err=true |
| rm -f ${RESULT_DIR}/require_test* |
| fi |
| if [ -f ${RESULT_DIR}/require_scratch ]; then |
| _check_scratch_fs || err=true |
| rm -f ${RESULT_DIR}/require_scratch* |
| fi |
| } |
| |
| _expunge_test() |
| { |
| local TEST_ID="$1" |
| if [ -s $tmp.xlist ]; then |
| if grep -q $TEST_ID $tmp.xlist; then |
| echo " [expunged]" |
| return 1 |
| fi |
| fi |
| return 0 |
| } |
| |
| _init_kmemleak |
| _prepare_test_list |
| |
| if $OPTIONS_HAVE_SECTIONS; then |
| trap "_summary; exit \$status" 0 1 2 3 15 |
| else |
| trap "_wrapup; exit \$status" 0 1 2 3 15 |
| fi |
| |
| for section in $HOST_OPTIONS_SECTIONS; do |
| OLD_FSTYP=$FSTYP |
| OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS |
| get_next_config $section |
| |
| # Do we need to run only some sections ? |
| if [ ! -z "$RUN_SECTION" ]; then |
| skip=true |
| for s in $RUN_SECTION; do |
| if [ $section == $s ]; then |
| skip=false |
| break; |
| fi |
| done |
| if $skip; then |
| continue |
| fi |
| fi |
| |
| # Did this section get excluded? |
| if [ ! -z "$EXCLUDE_SECTION" ]; then |
| skip=false |
| for s in $EXCLUDE_SECTION; do |
| if [ $section == $s ]; then |
| skip=true |
| break; |
| fi |
| done |
| if $skip; then |
| continue |
| fi |
| fi |
| |
| mkdir -p $RESULT_BASE |
| if [ ! -d $RESULT_BASE ]; then |
| echo "failed to create results directory $RESULT_BASE" |
| status=1 |
| exit |
| fi |
| |
| if $OPTIONS_HAVE_SECTIONS; then |
| echo "SECTION -- $section" |
| fi |
| |
| sect_start=`_wallclock` |
| if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then |
| echo "RECREATING -- $FSTYP on $TEST_DEV" |
| _test_unmount 2> /dev/null |
| if ! _test_mkfs >$tmp.err 2>&1 |
| then |
| echo "our local _test_mkfs routine ..." |
| cat $tmp.err |
| echo "check: failed to mkfs \$TEST_DEV using specified options" |
| status=1 |
| exit |
| fi |
| if ! _test_mount |
| then |
| echo "check: failed to mount $TEST_DEV on $TEST_DIR" |
| status=1 |
| exit |
| fi |
| _prepare_test_list |
| elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then |
| _test_unmount 2> /dev/null |
| if ! _test_mount |
| then |
| echo "check: failed to mount $TEST_DEV on $TEST_DIR" |
| status=1 |
| exit |
| fi |
| fi |
| |
| init_rc |
| |
| seq="check" |
| check="$RESULT_BASE/check" |
| |
| # don't leave old full output behind on a clean run |
| rm -f $check.full |
| |
| [ -f $check.time ] || touch $check.time |
| |
| # print out our test configuration |
| echo "FSTYP -- `_full_fstyp_details`" |
| echo "PLATFORM -- `_full_platform_details`" |
| if [ ! -z "$SCRATCH_DEV" ]; then |
| echo "MKFS_OPTIONS -- `_scratch_mkfs_options`" |
| echo "MOUNT_OPTIONS -- `_scratch_mount_options`" |
| fi |
| echo |
| needwrap=true |
| |
| if [ ! -z "$SCRATCH_DEV" ]; then |
| _scratch_unmount 2> /dev/null |
| # call the overridden mkfs - make sure the FS is built |
| # the same as we'll create it later. |
| |
| if ! _scratch_mkfs >$tmp.err 2>&1 |
| then |
| echo "our local _scratch_mkfs routine ..." |
| cat $tmp.err |
| echo "check: failed to mkfs \$SCRATCH_DEV using specified options" |
| status=1 |
| exit |
| fi |
| |
| # call the overridden mount - make sure the FS mounts with |
| # the same options that we'll mount with later. |
| if ! _scratch_mount >$tmp.err 2>&1 |
| then |
| echo "our local mount routine ..." |
| cat $tmp.err |
| echo "check: failed to mount \$SCRATCH_DEV using specified options" |
| status=1 |
| exit |
| fi |
| fi |
| |
| seqres="$check" |
| _check_test_fs |
| |
| for seq in $list |
| do |
| err=false |
| _err_msg="" |
| if [ ! -f $seq ]; then |
| # Try to get full name in case the user supplied only seq id |
| # and the test has a name. A bit of hassle to find really |
| # the test and not its sample output or helping files. |
| bname=$(basename $seq) |
| full_seq=$(find $(dirname $seq) -name $bname* -executable | |
| awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\ |
| END { print shortest }') |
| if [ -f $full_seq ] \ |
| && [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then |
| seq=$full_seq |
| fi |
| fi |
| |
| # the filename for the test and the name output are different. |
| # we don't include the tests/ directory in the name output. |
| export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"` |
| |
| # Similarly, the result directory needs to replace the tests/ |
| # part of the test location. |
| group=`dirname $seq` |
| if $OPTIONS_HAVE_SECTIONS; then |
| export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"` |
| REPORT_DIR="$RESULT_BASE/$section" |
| else |
| export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"` |
| REPORT_DIR="$RESULT_BASE" |
| fi |
| seqres="$REPORT_DIR/$seqnum" |
| |
| mkdir -p $RESULT_DIR |
| |
| echo -n "$seqnum" |
| |
| if $showme; then |
| _expunge_test $seqnum |
| if [ $? -eq 1 ]; then |
| continue |
| fi |
| echo |
| start=0 |
| stop=0 |
| n_notrun=`expr $n_notrun + 1` |
| if $do_report; then |
| _make_testcase_report "list" |
| fi |
| continue |
| fi |
| tc_status="pass" |
| if [ ! -f $seq ]; then |
| echo " - no such test?" |
| else |
| # really going to try and run this one |
| # |
| rm -f $seqres.out.bad |
| |
| # check if we really should run it |
| _expunge_test $seqnum |
| if [ $? -eq 1 ]; then |
| continue |
| fi |
| |
| # slashes now in names, sed barfs on them so use grep |
| lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'` |
| if [ "X$lasttime" != X ]; then |
| echo -n " ${lasttime}s ..." |
| else |
| echo -n " " # prettier output with timestamps. |
| fi |
| rm -f core $seqres.notrun |
| |
| start=`_wallclock` |
| $timestamp && echo -n " ["`date "+%T"`"]" |
| [ ! -x $seq ] && chmod u+x $seq # ensure we can run it |
| $LOGGER_PROG "run xfstest $seqnum" |
| if [ -w /dev/kmsg ]; then |
| export date_time=`date +"%F %T"` |
| echo "run fstests $seqnum at $date_time" > /dev/kmsg |
| # _check_dmesg depends on this log in dmesg |
| touch ${RESULT_DIR}/check_dmesg |
| fi |
| if [ "$DUMP_OUTPUT" = true ]; then |
| ./$seq 2>&1 | tee $tmp.out |
| # Because $? would get tee's return code |
| sts=${PIPESTATUS[0]} |
| else |
| ./$seq >$tmp.out 2>&1 |
| sts=$? |
| fi |
| $timestamp && _timestamp |
| stop=`_wallclock` |
| |
| if [ -f core ] |
| then |
| _err_msg="[dumped core]" |
| echo -n " $_err_msg" |
| mv core $RESULT_BASE/$seqnum.core |
| err=true |
| fi |
| |
| if [ -f $seqres.notrun ] |
| then |
| $timestamp || echo -n " [not run] " |
| $timestamp && echo " [not run]" && echo -n " $seqnum -- " |
| cat $seqres.notrun |
| notrun="$notrun $seqnum" |
| n_notrun=`expr $n_notrun + 1` |
| tc_status="notrun" |
| else |
| if [ $sts -ne 0 ] |
| then |
| _err_msg="[failed, exit status $sts]" |
| echo -n " $_err_msg" |
| err=true |
| fi |
| if [ ! -f $seq.out ] |
| then |
| _dump_err "no qualified output" |
| err=true |
| else |
| |
| # coreutils 8.16+ changed quote formats in error messages from |
| # `foo' to 'foo'. Filter old versions to match the new version. |
| sed -i "s/\`/\'/g" $tmp.out |
| if diff $seq.out $tmp.out >/dev/null 2>&1 |
| then |
| if $err |
| then |
| : |
| else |
| echo "$seqnum `expr $stop - $start`" >>$tmp.time |
| echo -n " `expr $stop - $start`s" |
| fi |
| echo "" |
| else |
| echo " - output mismatch (see $seqres.out.bad)" |
| mv $tmp.out $seqres.out.bad |
| $diff $seq.out $seqres.out.bad | { |
| if test "$DIFF_LENGTH" -le 0; then |
| cat |
| else |
| head -n "$DIFF_LENGTH" |
| echo "..." |
| echo "(Run '$diff $seq.out $seqres.out.bad'" \ |
| " to see the entire diff)" |
| fi; } | \ |
| sed -e 's/^\(.\)/ \1/' |
| _err_msg="output mismatch (see $diff $seq.out $seqres.out.bad)" |
| err=true |
| fi |
| fi |
| try="$try $seqnum" |
| n_try=`expr $n_try + 1` |
| _check_filesystems |
| _check_dmesg || err=true |
| _check_kmemleak || err=true |
| fi |
| |
| fi |
| |
| # come here for each test, except when $showme is true |
| # |
| if $err |
| then |
| bad="$bad $seqnum" |
| n_bad=`expr $n_bad + 1` |
| tc_status="fail" |
| fi |
| if $do_report; then |
| _make_testcase_report "$tc_status" |
| fi |
| seq="after_$seqnum" |
| done |
| sect_stop=`_wallclock` |
| interrupt=false |
| _wrapup |
| interrupt=true |
| echo |
| |
| _test_unmount 2> /dev/null |
| _scratch_unmount 2> /dev/null |
| done |
| |
| interrupt=false |
| status=`expr $sum_bad != 0` |
| exit |