| [global] |
| bs=4k |
| size=10g |
| ioengine=libpmem |
| norandommap |
| time_based |
| group_reporting |
| invalidate=1 |
| disable_lat=1 |
| disable_slat=1 |
| disable_clat=1 |
| clat_percentiles=0 |
| |
| iodepth=1 |
| iodepth_batch=1 |
| thread |
| numjobs=1 |
| runtime=300 |
| |
| # |
| # depends on direct option, flags are set for pmem_memcpy() call: |
| # direct=1 - PMEM_F_MEM_NONTEMPORAL, |
| # direct=0 - PMEM_F_MEM_TEMPORAL. |
| # |
| direct=1 |
| |
| # |
| # sync=1 means that pmem_drain() is executed for each write operation. |
| # |
| sync=1 |
| |
| # |
| # In case of 'scramble_buffers=1', the source buffer |
| # is rewritten with a random value every write operation. |
| # |
| # But when 'scramble_buffers=0' is set, the source buffer isn't |
| # rewritten. So it will be likely that the source buffer is in CPU |
| # cache and it seems to be high write performance. |
| # |
| scramble_buffers=1 |
| |
| # |
| # Setting for fio process's CPU Node and Memory Node. |
| # Set proper node below or use `numactl` command along with FIO. |
| # |
| numa_cpu_nodes=0 |
| numa_mem_policy=bind:0 |
| |
| # |
| # split means that each job will get a unique CPU from the CPU set |
| # |
| cpus_allowed_policy=split |
| |
| # |
| # The libpmem engine does IO to files in a DAX-mounted filesystem. |
| # The filesystem should be created on a Non-Volatile DIMM (e.g /dev/pmem0) |
| # and then mounted with the '-o dax' option. Note that the engine |
| # accesses the underlying NVDIMM directly, bypassing the kernel block |
| # layer, so the usual filesystem/disk performance monitoring tools such |
| # as iostat will not provide useful data. |
| # |
| #filename=/mnt/pmem/somefile |
| directory=/mnt/pmem |
| |
| [libpmem-seqwrite] |
| rw=write |
| stonewall |
| |
| [libpmem-seqread] |
| rw=read |
| stonewall |
| |
| #[libpmem-randwrite] |
| #rw=randwrite |
| #stonewall |
| |
| #[libpmem-randread] |
| #rw=randread |
| #stonewall |