Add random filesize, append and bulk write support

Signed-off-by: David Oberhollenzer <david.oberhollenzer@tele2.at>
Signed-off-by: Richard Weinberger <richard@nod.at>
diff --git a/fsstress.sh b/fsstress.sh
index f0d58bb..c0c65a2 100755
--- a/fsstress.sh
+++ b/fsstress.sh
@@ -13,15 +13,20 @@
 #
 # TODO:
 # - getopt support
-# - more write tests (file appned, bulk write, ...)
 # - report results from UBI stats interface (needs rewrite in C)
 #
 
 set -u -e
 
+BIG_FILE_NUM=19
 FILE_NUM=99
 RUN_NUM=9
 WRITE_MUCH=0
+BULK_WRITE=0
+MAX_SIZE_MB=5
+MIN_SIZE_MB=1
+MIN_BULK_SIZE_MB=10
+MAX_BULK_SIZE_MB=20
 
 mkdir -p testdir
 
@@ -34,8 +39,11 @@
 {
 	for i in $(seq 0 $FILE_NUM)
 	do
+		SIZE=$(($RANDOM % ($MAX_SIZE_MB - $MIN_SIZE_MB) + $MIN_SIZE_MB))
+
 		touch testdir/smallfile$i
-		dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=1 &> /dev/null
+		dd if=/dev/urandom of=testdir/smallfile$i bs=1M \
+			count=$SIZE &> /dev/null
 	done
 }
 
@@ -61,6 +69,43 @@
 	sync
 }
 
+write_big_files_sync()
+{
+	for i in $(seq 0 $BIG_FILE_NUM)
+	do
+		SIZE=$(($RANDOM % ($MAX_BULK_SIZE_MB - $MIN_BULK_SIZE_MB) +
+				$MIN_BULK_SIZE_MB))
+
+		touch testdir/bigfile$i
+		sync
+		dd if=/dev/urandom of=testdir/bigfile$i bs=1M count=$SIZE &> /dev/null
+		sync
+	done
+}
+
+append_to_files()
+{
+	for i in $(seq 0 $FILE_NUM)
+	do
+		SIZE=$(($RANDOM % $MAX_SIZE_MB))
+
+		dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
+			oflag=append conv=notrunc &> /dev/null
+	done
+}
+
+append_to_files_sync()
+{
+	for i in $(seq 0 $FILE_NUM)
+	do
+		SIZE=$(($RANDOM % $MAX_SIZE_MB))
+
+		dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
+			oflag=append conv=notrunc &> /dev/null
+		sync
+	done
+}
+
 read_files()
 {
     for i in $(seq 1 $FILE_NUM)
@@ -69,11 +114,22 @@
     done
 }
 
+read_big_files()
+{
+	for i in $(seq 0 $BIG_FILE_NUM)
+	do
+		cat testdir/bigfile$i > /dev/null
+	done
+}
+
 for r in $(seq 0 $RUN_NUM)
 do
 	drop_caches
 	write_files
 
+	drop_caches
+	append_to_files
+
 	for i in $(seq 1 20)
 	do
 		read_files
@@ -104,5 +160,20 @@
 			write_files_sync
 			read_files
 		done
+
+		for i in $(seq 1 20)
+		do
+			append_to_files_sync
+			read_files
+		done
+	fi
+
+	if [ $BULK_WRITE -eq 1 ]
+	then
+		for i in $(seq 1 20)
+		do
+			write_big_files_sync
+			read_big_files
+		done
 	fi
 done