mirror of
https://git.proxmox.com/git/mirror_zfs
synced 2025-04-28 18:28:46 +00:00
ZTS: Increase write sizes for RAIDZ/dRAID tests
Many RAIDZ/dRAID tests filled files doing millions of 100 or even 10 byte writes. It makes very little sense since we are not micro-benchmarking syscalls or VFS layer here, while before the blocks reach the vdev layer absolute majority of the small writes will be aggregated. In some cases I see we spend almost as much time creating the test files as actually running the tests. And sometimes the tests even time out after that. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: George Melikov <mail@gmelikov.ru> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored by: iXsystems, Inc. Closes #16905
This commit is contained in:
parent
c37a2ddaaa
commit
89f796dec6
@ -200,13 +200,13 @@ log_must zpool create -f -o cachefile=none $TESTPOOL $raid ${disks[@]}
|
||||
log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R
|
||||
|
||||
log_must check_pool_status $TESTPOOL "errors" "No known data errors"
|
||||
|
||||
|
@ -78,13 +78,13 @@ log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
log_must zfs set primarycache=metadata $pool
|
||||
|
||||
log_must zfs create $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$pool/fs 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 512 100 1024 R
|
||||
log_must fill_fs /$pool/fs2 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $pool/fs3
|
||||
log_must fill_fs /$pool/fs3 1 512 100 1024 R
|
||||
log_must fill_fs /$pool/fs3 1 512 102400 1 R
|
||||
|
||||
typeset pool_size=$(get_pool_prop size $pool)
|
||||
|
||||
|
@ -92,7 +92,7 @@ log_must zpool destroy $pool
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($devs-1))]}
|
||||
log_must zfs set primarycache=metadata $pool
|
||||
log_must zfs create $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$pool/fs 1 512 102400 1 R
|
||||
allocated=$(zpool list -Hp -o allocated $pool)
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $((allocated / 4))
|
||||
log_must zpool attach $pool ${raid}-0 ${disks[$devs]}
|
||||
|
@ -94,10 +94,10 @@ opts="-o cachefile=none"
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 256 100 1024 R
|
||||
log_must fill_fs /$pool/fs 1 256 102400 1 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 256 100 1024 R
|
||||
log_must fill_fs /$pool/fs2 1 256 102400 1 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
log_must mkfile -n 400m /$pool/fs/file
|
||||
|
@ -81,10 +81,10 @@ log_must set_tunable32 SCRUB_AFTER_EXPAND 0
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 128 100 1024 R
|
||||
log_must fill_fs /$pool/fs 1 128 102400 1 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 128 100 1024 R
|
||||
log_must fill_fs /$pool/fs2 1 128 102400 1 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
log_must zpool attach $pool ${raid}-0 $disk
|
||||
|
@ -137,10 +137,10 @@ log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
devices="${disks[1..$(($nparity+1))]}"
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 128 100 1024 R
|
||||
log_must fill_fs /$pool/fs 1 128 102400 1 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 128 100 1024 R
|
||||
log_must fill_fs /$pool/fs2 1 128 102400 1 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
# Set pause to some random value near halfway point
|
||||
|
@ -223,13 +223,13 @@ for nparity in 1 2 3; do
|
||||
log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R
|
||||
|
||||
typeset pool_size=$(get_pool_prop size $TESTPOOL)
|
||||
|
||||
|
@ -119,13 +119,13 @@ for nparity in 1 2 3; do
|
||||
log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R
|
||||
|
||||
log_must zpool export $TESTPOOL
|
||||
log_must zpool import -o cachefile=none -d $dir $TESTPOOL
|
||||
|
@ -94,13 +94,13 @@ for nparity in 1 2 3; do
|
||||
# log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 256 10 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs 1 256 10240 1 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 256 10 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 256 10240 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 256 10 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 256 10240 1 R
|
||||
|
||||
log_must zpool export $TESTPOOL
|
||||
log_must zpool import -o cachefile=none -d $dir $TESTPOOL
|
||||
|
@ -223,13 +223,13 @@ for nparity in 1 2 3; do
|
||||
log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R
|
||||
|
||||
typeset pool_size=$(get_pool_prop size $TESTPOOL)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user