blob: f74cb4c1a275ae423223997f469112e706803ef1 [file] [log] [blame]
#!/bin/sh
#
# The purpose of this script is simulating a typical workload in short time.
# Especially filesystem read's are interesting for us as they can lead
# to read disturb on the NAND flash. UBIFS utilizes the page cache,
# therefore reading the same file multiple times will not lead to multiple
# reads at MTD level.
# But the page cache is not an infinite resource and the kernel is allowed to
# shrink/flush it at any time, this can lead to reads on MTD level again.
# To simulate that the script regularly flushes the page cache and the inode cache.
#
# The tests are currently rather archaic due to limitations of Busybox.
#
# TODO:
# - report results from UBI stats interface (needs rewrite in C)
#
set -u -e
BIG_FILE_NUM=19
FILE_NUM=99
RUN_NUM=9
WRITE_MUCH=0
BULK_WRITE=0
MAX_SIZE_MB=5
MIN_SIZE_MB=1
MIN_BULK_SIZE_MB=10
MAX_BULK_SIZE_MB=20
mkdir -p testdir
drop_caches()
{
echo 3 > /proc/sys/vm/drop_caches
}
write_files()
{
for i in $(seq 0 $FILE_NUM)
do
SIZE=$(($RANDOM % ($MAX_SIZE_MB - $MIN_SIZE_MB) + $MIN_SIZE_MB))
touch testdir/smallfile$i
dd if=/dev/urandom of=testdir/smallfile$i bs=1M \
count=$SIZE &> /dev/null
done
}
write_files_sync()
{
for i in $(seq 0 $FILE_NUM)
do
touch testdir/smallfile$i
sync
dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=1 &> /dev/null
sync
done
}
write_rand_file()
{
dd if=/dev/urandom of=testdir/smallfile$(($RANDOM % $FILE_NUM)) bs=1M count=1 &> /dev/null
}
write_rand_file_sync()
{
dd if=/dev/urandom of=testdir/smallfile$(($RANDOM % $FILE_NUM)) bs=1M count=1 &> /dev/null
sync
}
write_big_files_sync()
{
for i in $(seq 0 $BIG_FILE_NUM)
do
SIZE=$(($RANDOM % ($MAX_BULK_SIZE_MB - $MIN_BULK_SIZE_MB) +
$MIN_BULK_SIZE_MB))
touch testdir/bigfile$i
sync
dd if=/dev/urandom of=testdir/bigfile$i bs=1M count=$SIZE &> /dev/null
sync
done
}
append_to_files()
{
for i in $(seq 0 $FILE_NUM)
do
SIZE=$(($RANDOM % $MAX_SIZE_MB))
if [ $SIZE -gt 0 ]
then
dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
oflag=append conv=notrunc &> /dev/null
fi
done
}
append_to_files_sync()
{
for i in $(seq 0 $FILE_NUM)
do
SIZE=$(($RANDOM % $MAX_SIZE_MB))
if [ $SIZE -gt 0 ]
then
dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
oflag=append conv=notrunc &> /dev/null
sync
fi
done
}
read_files()
{
for i in $(seq 1 $FILE_NUM)
do
cat testdir/smallfile$i > /dev/null
done
}
read_big_files()
{
for i in $(seq 0 $BIG_FILE_NUM)
do
cat testdir/bigfile$i > /dev/null
done
}
usage()
{
SELF=`basename "$0"`
echo "Usage: $SELF [arguments]"
echo " -b, --bulk If set, perform bulk write test."
echo " -w, --writemuch If set, perform write stress test."
echo " -r, --runs <count> Specify the number of test iterations."
echo " -n, --files <count> Specify the number of small files to create."
echo " -N, --big-files <count> Specifiy the number of large files to create."
echo " -h, --help Display this text and exit."
echo " --min-mb <count> The minimum size (MiB) of small files."
echo " --max-mb <count> The maximum size (MiB) of small files."
echo " --min-bulk-mb <count> The minimum size (MiB) of large files."
echo " --max-bulk-mb <count> The maximum size (MiB) of large files."
exit
}
O=`getopt -l bulk,writemuch,runs:,files:,big-files:,max-mb:,min-mb:,max-bulk-mb:,min-bulk-mb:,help -- bwr:n:N:h "$@"`
eval set -- "$O"
while [ $# -gt 0 ]; do
case "$1" in
-b|--bulk) BULK_WRITE=1;;
-w|--writemuch) WRITE_MUCH=1;;
-r|--runs) RUN_NUM=$(($2 - 1)); shift;;
-n|--files) FILE_NUM=$(($2 - 1)); shift;;
-N|--big-files) BIG_FILE_NUM=$(($2 - 1)); shift;;
-h|--help) usage;;
--max-mb) MAX_SIZE_MB="$2"; shift;;
--min-mb) MIN_SIZE_MB="$2"; shift;;
--max-bulk-mb) MAX_BULK_SIZE_MB="$2"; shift;;
--min-bulk-mb) MIN_BULK_SIZE_MB="$2"; shift;;
--) shift; break;;
esac
shift
done
for r in $(seq 0 $RUN_NUM)
do
drop_caches
write_files
drop_caches
append_to_files
for i in $(seq 1 20)
do
read_files
done
for i in $(seq 1 20)
do
drop_caches
read_files
done
if [ $FILE_NUM -gt 0 ]
then
for i in $(seq 1 20)
do
read_files
write_rand_file
done
for i in $(seq 1 20)
do
read_files
write_rand_file_sync
done
fi
if [ $WRITE_MUCH -eq 1 ]
then
for i in $(seq 1 20)
do
write_files_sync
read_files
done
for i in $(seq 1 20)
do
append_to_files_sync
read_files
done
fi
if [ $BULK_WRITE -eq 1 ]
then
for i in $(seq 1 20)
do
write_big_files_sync
read_big_files
done
fi
done