Skip to content

Commit

Permalink
DOSE-601 Performance: add file deletion test (openzfs#450)
Browse files Browse the repository at this point in the history
  • Loading branch information
tonynguien authored Sep 20, 2021
1 parent bba40d4 commit c8b90e1
Show file tree
Hide file tree
Showing 5 changed files with 149 additions and 1 deletion.
2 changes: 1 addition & 1 deletion tests/runfiles/perf-regression.run
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,6 @@ tags = ['perf']
tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached',
'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached',
'random_reads', 'random_writes', 'random_readwrite', 'random_writes_zil',
'random_readwrite_fixed']
'random_readwrite_fixed', 'file_deletion']
post =
tags = ['perf', 'regression']
1 change: 1 addition & 0 deletions tests/zfs-tests/tests/perf/fio/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ dist_pkgdata_DATA = \
random_readwrite.fio \
random_readwrite_fixed.fio \
random_writes.fio \
random_writes_fill.fio \
sequential_reads.fio \
sequential_writes.fio \
sequential_readwrite.fio
35 changes: 35 additions & 0 deletions tests/zfs-tests/tests/perf/fio/random_writes_fill.fio
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#

#
# Copyright (c) 2016 by Delphix. All rights reserved.
# Copyright (c) 2020, Kjeld Schouten-Lebbing. All rights reserved.
#

[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
ioengine=psync
bs=8k
rw=randwrite
thread=1
sync=${SYNC_TYPE}
directory=${DIRECTORY}
numjobs=${NUMJOBS}
filesize=${FILE_SIZE}
number_ios=${PERF_NUMIOS}
randseed=${PERF_RANDSEED}
buffer_compress_percentage=${PERF_COMPPERCENT}
buffer_pattern=0xdeadbeef
buffer_compress_chunk=${PERF_COMPCHUNK}

[job]
1 change: 1 addition & 0 deletions tests/zfs-tests/tests/perf/regression/Makefile.am
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/regression
dist_pkgdata_SCRIPTS = \
file_deletion.ksh \
random_reads.ksh \
random_readwrite.ksh \
random_readwrite_fixed.ksh \
Expand Down
111 changes: 111 additions & 0 deletions tests/zfs-tests/tests/perf/regression/file_deletion.ksh
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#!/bin/ksh

#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#

#
# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
#

#
# Description:
# Measure file deletion operation, i.e. rm(1) command.
#
# Prior to deletion, the dataset is created and fio randomly writes a new
# file into an otherwise empty pool.
#

. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib

function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}

trap "log_fail \"Measure stats during file deletion\"" SIGTERM
log_onexit cleanup

recreate_perf_pool
populate_perf_filesystems

#
# AWS VM has a 500GB EBS storage limit so 400GB pool (70GB rpool disk) is the expected
# pool size. With 3x compressratio and 50% fill target, file size is ~600GB.
#
if use_object_store; then
export TOTAL_SIZE=$((600 * 1024 * 1024 * 1024))
else
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
fi

# Variables for use by fio.
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=1
export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
export PERF_NUMIOS=655360 # 5GB worth of IOs

# Random writing to the file
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
export SYNC_TYPE=$PERF_SYNC_TYPES

log_note "Random writes"
log_must fio $FIO_SCRIPTS/random_writes_fill.fio
log_must zpool sync $PERFPOOL
log_must zinject -a

# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"

# Run log collection for only 10 seconds which should be sufficient.
export PERF_RUNTIME=10
if is_linux; then
typeset perf_record_cmd="perf record --call-graph dwarf,8192 -F 49 -agq \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"iostat -tdxyz 1" "iostat"
"arcstat 1" "arcstat"
"dstat -at --nocolor 1" "dstat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
do_collect_scripts delete

log_note "Removing file"
directory=$(get_directory)
log_note "DIRECTORY: " $directory
for f in $(ls $directory); do
typeset t0=$SECONDS
log_must rm ${directory}/${f}
typeset elapsed=$((SECONDS - t0))
log_note "${directory}/${f} deletion took: ${elapsed} secs"
done
log_pass "Measure stats during file deletion"

0 comments on commit c8b90e1

Please sign in to comment.