Skip to content

Commit

Permalink
DP-1445 Zettacahe disk partitioning should match appstack (openzfs#582)
Browse files Browse the repository at this point in the history
  • Loading branch information
jwk404 authored Aug 24, 2022
1 parent 03064c3 commit b544b99
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 38 deletions.
30 changes: 26 additions & 4 deletions scripts/zfs-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,25 @@ get_cache_part() {
echo "$cache_part"
}

# Calculate the size of the Zettacache slog in KiB, as MIN(8GiB, 25% of device)
get_logsize_in_K() {
device="$1"
# The maximum size of the slog device (8GiB, in 512b sectors)
max_sectors=16777216

# Calculate num of 512b sectors required for slog as 25% of the device.
# This invocation of blockdev returns the device size in 512b sectors.
required_sectors_log_dev="$(($(sudo blockdev --getsz "$device") / 4))"

# For larger disk size cap the max slog device size to 8Gb
if [ "$required_sectors_log_dev" -ge "$max_sectors" ]; then
echo "$((8192 * 1024))"
else
# Convert number of 512b sectors to KiB
echo "$((required_sectors_log_dev / 2))"
fi
}

configure_zettacache() {
for cache_dev in ${ZETTACACHE_DEVICES}; do
#
Expand All @@ -534,12 +553,15 @@ configure_zettacache() {
#
if echo "$cache_dev" | \
grep -E -q "^/dev/disk/(azure|by-id)/"; then
printf "size=16777216, bootable\n," | \
sudo sfdisk -q -X gpt --wipe always "$cache_dev"
logsize="$(get_logsize_in_K "$cache_dev")"
printf "size=%sK, bootable\n," "$logsize" | \
sudo sfdisk -q -X gpt --wipe always "$cache_dev" > \
/dev/null 2>&1
else
printf "size=16777216, bootable\n," | \
logsize="$(get_logsize_in_K "/dev/${cache_dev##*/}")"
printf "size=%sK, bootable\n," "$logsize" | \
sudo sfdisk -q -X gpt --wipe always \
"/dev/$(basename "$cache_dev")"
"/dev/${cache_dev##*/}" >/dev/null 2>&1
fi

invalidate_zcache_dev "$cache_dev"
Expand Down
3 changes: 2 additions & 1 deletion tests/zfs-tests/include/commands.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -158,13 +158,14 @@ export SYSTEM_FILES_LINUX='attr
parted
perf
setfattr
sfdisk
sha256sum
systemctl
udevadm
unshare
useradd
userdel
usermod
flock
logger'

Expand Down
63 changes: 30 additions & 33 deletions tests/zfs-tests/include/libtest.shlib
Original file line number Diff line number Diff line change
Expand Up @@ -1580,7 +1580,7 @@ function get_devices
sed -n 's/\/dev\/\(sd[a-z]\|nvme[0-9]n1\)[p0-9]/\1/p')

# Find the disk that is being used as the linux file system
linux_filesystem_disk=$(sudo sfdisk -l | grep 'Linux filesystem' |
linux_filesystem_disk=$(sfdisk -l | grep 'Linux filesystem' |
awk '{print $1}' | awk -F '/' '{print $NF}' |
sed -n 's/\(sd[a-z]\|nvme[0-9]n1\)[p0-9]*/\1/p')

Expand All @@ -1601,7 +1601,7 @@ function get_devices
function is_device_partitioned
{
device="/dev/$1"
sudo sfdisk -l $device 2>/dev/null | grep -q "Device"
sfdisk -l $device 2>/dev/null | grep -q "Device"
}

#
Expand All @@ -1612,7 +1612,7 @@ function destroy_partition
device="/dev/$1"
# The partition number to destroy
partnum=$2
sudo sfdisk -q --delete $device $partnum
sfdisk -q --delete $device $partnum
}

#
Expand All @@ -1621,29 +1621,27 @@ function destroy_partition
#
function create_slog_zcache_partition
{
device="/dev/$1"
typeset device="/dev/$1"
typeset logsize=$((8192 * 1024))

# Delete existing partition if any
is_device_partitioned "$1" && destroy_partition "$1"

# The size of log device to be created (Default 25% of total)
log_dev_size_in_pct=${2:-25}
# The maximum size of the slog device (8GiB, in 512b sectors)
typeset max_sectors=16777216

# Get total available sectors in the device
available_sectors=$(blockdev --getsz $device)
# Calculate num of sectors required for slog
required_sectors_log_dev=$(bc <<< "$available_sectors * $log_dev_size_in_pct/100")
# Calculate num of 512b sectors required for slog as 25% of the device.
# This invocation of blockdev returns the device size in 512b sectors.
typeset required_sectors_log_dev=$(($(blockdev --getsz $device) / 4))

#
# For larger disk size cap the max slog device size to 8Gb
# Num sectors in 8GB = 8(in GB) * 1024 * 1024 * 2 = 1677216
#
if [ $required_sectors_log_dev -gt 16777216 ]; then
required_sectors_log_dev=1677216
# If 25% of the disk is smaller than 8GiB, use that value
if [[ $required_sectors_log_dev -lt $max_sectors ]]; then
# Convert number of 512b sectors to KiB
logsize=$((required_sectors_log_dev / 2))
fi

printf "size=$required_sectors_log_dev, bootable\n," | \
sudo sfdisk -q -X gpt --wipe always $device
printf "size=${logsize}K, bootable\n," | \
sfdisk -q -X gpt --wipe always $device
}

# Take a Zettacache device as either an absolute or relative path and
Expand All @@ -1664,10 +1662,10 @@ function get_slog_part
elif [[ "$devname" =~ "^/dev/disk/azure" ]]; then
slog_part="${devname}-part1"
elif [[ "$devname" =~ "nvme" ]]; then
devname="$(basename "$devname")"
devname="${devname##*/}"
slog_part="/dev/${devname}p1"
else
devname="$(basename "$devname")"
devname="${devname##*/}"
slog_part="/dev/${devname}1"
fi

Expand All @@ -1693,10 +1691,10 @@ function get_cache_part
elif [[ "$devname" =~ "^/dev/disk/azure" ]]; then
cache_part="${devname}-part2"
elif [[ "$devname" =~ "nvme" ]]; then
devname="$(basename "$devname")"
devname="${devname##*/}"
cache_part="/dev/${devname}p2"
else
devname="$(basename "$devname")"
devname="${devname##*/}"
cache_part="/dev/${devname}2"
fi

Expand All @@ -1708,17 +1706,17 @@ function invalidate_zcache_dev
{
typeset cache_dev="$1"
cache_part="$(get_cache_part "$cache_dev")"
sudo zcache labelclear -f "$cache_part"
zcache labelclear -f "$cache_part"
}

function invalidate_zcache
{
log_note "Invalidate Zettacache device(s)"
sudo systemctl stop zfs-object-agent
systemctl stop zfs-object-agent
for cache_dev in $ZETTACACHE_DEVICES; do
invalidate_zcache_dev $cache_dev
done
sudo systemctl start zfs-object-agent
systemctl start zfs-object-agent
for cache_dev in $ZETTACACHE_DEVICES; do
zcache add "$(get_cache_part $cache_dev)"
done
Expand All @@ -1731,11 +1729,11 @@ function invalidate_zcache
function start_zfs_object_agent
{
if $HAS_ZOA_SERVICE; then
sudo systemctl start zfs-object-agent
systemctl start zfs-object-agent
else
sudo -E /sbin/zfs_object_agent -vv -t $ZOA_CONFIG \
/sbin/zfs_object_agent -vv -t $ZOA_CONFIG \
--output-file=$ZOA_LOG 2>&1 | \
sudo tee $ZOA_OUTPUT > /dev/null &
tee $ZOA_OUTPUT > /dev/null &
fi
# Wait for a moment for the zfs-object-agent to be active & running
sleep 1
Expand All @@ -1748,9 +1746,9 @@ function start_zfs_object_agent
function stop_zfs_object_agent
{
if $HAS_ZOA_SERVICE; then
sudo systemctl stop zfs-object-agent
systemctl stop zfs-object-agent
else
sudo pkill -9 -f zfs_object_agent
pkill -9 -f zfs_object_agent
fi
}

Expand All @@ -1769,7 +1767,7 @@ function add_tunable
{
name="$1"
value="$2"
echo "$name=$value" | sudo tee -a $ZOA_CONFIG > /dev/null
echo "$name=$value" | tee -a $ZOA_CONFIG > /dev/null
}

# Returns if a tunable is already configured
Expand All @@ -1795,8 +1793,7 @@ function add_or_update_tunable
# followed by a =
# followed by 0 or more white spaces
# followed by group that captures anything
sudo -E \
sed -E -i "s/\s*${name}\s*=\s*(.*)/${name}=${value}/" $ZOA_CONFIG
sed -E -i "s/\s*${name}\s*=\s*(.*)/${name}=${value}/" $ZOA_CONFIG
else
add_tunable "$name" "$value"
fi
Expand Down

0 comments on commit b544b99

Please sign in to comment.