Skip to content

Commit

Permalink
Merge pull request openzfs#517 from delphix/projects/merge-upstream/m…
Browse files Browse the repository at this point in the history
…aster

Merge remote-tracking branch '6.0/stage' into 'master'
  • Loading branch information
grwilson authored Jul 18, 2022
2 parents ed28338 + b1ed8f8 commit 9cdcc7e
Show file tree
Hide file tree
Showing 13 changed files with 195 additions and 42 deletions.
13 changes: 11 additions & 2 deletions cmd/zed/zed_disk_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ zed_udev_monitor(void *arg)
while (1) {
struct udev_device *dev;
const char *action, *type, *part, *sectors;
const char *bus, *uuid;
const char *bus, *uuid, *devpath;
const char *class, *subclass;
nvlist_t *nvl;
boolean_t is_zfs = B_FALSE;
Expand Down Expand Up @@ -263,10 +263,19 @@ zed_udev_monitor(void *arg)
* device id string is required in the message schema
* for matching with vdevs. Preflight here for expected
* udev information.
*
* Special case:
* NVMe devices don't have ID_BUS set (at least on RHEL 7-8),
* but they are valid for autoreplace. Add a special case for
* them by searching for "/nvme/" in the udev DEVPATH:
*
* DEVPATH=/devices/pci0000:00/0000:00:1e.0/nvme/nvme2/nvme2n1
*/
bus = udev_device_get_property_value(dev, "ID_BUS");
uuid = udev_device_get_property_value(dev, "DM_UUID");
if (!is_zfs && (bus == NULL && uuid == NULL)) {
devpath = udev_device_get_devpath(dev);
if (!is_zfs && (bus == NULL && uuid == NULL &&
strstr(devpath, "/nvme/") == NULL)) {
zed_log_msg(LOG_INFO, "zed_udev_monitor: %s no devid "
"source", udev_device_get_devnode(dev));
udev_device_unref(dev);
Expand Down
6 changes: 3 additions & 3 deletions cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1045,14 +1045,14 @@ impl BlockAllocatorBuilder {
disk_stats.free_bytes += slab.free_space();
disk_stats.alloc_bytes += slab.allocated_space();

if matches!(slab.inner, SlabEnum::Evacuating(_)) {
let evacuating = matches!(slab.inner, SlabEnum::Evacuating(_));
if evacuating {
evacuating_slabs.push(slab.id);
}

if let Some(&disk) = removing_disks.get(&slab_disk) {
noalloc_state.get_mut_or_default(disk).insert(slab.id);
disk_stats.noalloc_bytes += slab.free_space();
} else {
} else if !evacuating {
assert_eq!(disk_stats.noalloc_bytes, 0);
slabs_by_bucket
.entry(SlabBucketSize(slab.max_size()))
Expand Down
3 changes: 3 additions & 0 deletions include/sys/mntent.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,5 +108,8 @@
#define MNTOPT_NOACL "noacl" /* likewise */
#define MNTOPT_POSIXACL "posixacl" /* likewise */
#define MNTOPT_MNTPOINT "mntpoint" /* mount point hint */
#define MNTOPT_CASESENSITIVE "casesensitive" /* case sensitivity */
#define MNTOPT_CASEINSENSITIVE "caseinsensitive" /* case insensitivity */
#define MNTOPT_CASEMIXED "casemixed" /* case mixed */

#endif /* _SYS_MNTENT_H */
7 changes: 7 additions & 0 deletions lib/libzfs/os/linux/libzfs_mount_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,13 @@ static const option_map_t option_map[] = {
{ MNTOPT_ACL, MS_POSIXACL, ZS_COMMENT },
{ MNTOPT_NOACL, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_POSIXACL, MS_POSIXACL, ZS_COMMENT },
/*
* Case sensitive options are just listed here to silently
* ignore the error if passed with zfs mount command.
*/
{ MNTOPT_CASESENSITIVE, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_CASEINSENSITIVE, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_CASEMIXED, MS_COMMENT, ZS_COMMENT },
#ifdef MS_NOATIME
{ MNTOPT_NOATIME, MS_NOATIME, ZS_COMMENT },
{ MNTOPT_ATIME, MS_COMMENT, ZS_COMMENT },
Expand Down
29 changes: 14 additions & 15 deletions lib/libzutil/zutil_import.c
Original file line number Diff line number Diff line change
Expand Up @@ -2167,6 +2167,7 @@ for_each_vdev_in_nvlist(nvlist_t *nvroot, pool_vdev_iter_f func, void *data)
int
zoa_resume_destroy(void *hdl, importargs_t *iarg)
{
char *protocol = NULL;
char *endpoint = NULL;
char *region = NULL;
char *bucket = NULL;
Expand All @@ -2183,30 +2184,28 @@ zoa_resume_destroy(void *hdl, importargs_t *iarg)
if (bucket == NULL) {
return (-1);
}
if (nvlist_lookup_string(iarg->props, "object-endpoint", &endpoint)
!= 0) {
return (-1);
}
if (nvlist_lookup_string(iarg->props, "object-region", &region) != 0) {
return (-1);
}
nvlist_lookup_string(iarg->props, "object-protocol", &protocol);
nvlist_lookup_string(iarg->props, "object-endpoint", &endpoint);
nvlist_lookup_string(iarg->props, "object-region", &region);
nvlist_lookup_string(iarg->props, "object-credentials-profile",
&profile);

// Resume destroy
nvlist_t *msg = fnvlist_alloc();
fnvlist_add_string(msg, AGENT_REQUEST_TYPE,
AGENT_TYPE_RESUME_DESTROY_POOL);
fnvlist_add_string(msg, AGENT_BUCKET, bucket);
fnvlist_add_string(msg, AGENT_REGION, region);
fnvlist_add_string(msg, AGENT_ENDPOINT, endpoint);
if (profile != NULL) {
fnvlist_add_string(msg, AGENT_CRED_PROFILE, profile);
}
fnvlist_add_uint64(msg, AGENT_GUID, iarg->guid);
if (iarg->poolname != NULL) {
if (iarg->poolname != NULL)
fnvlist_add_string(msg, AGENT_NAME, iarg->poolname);
}
fnvlist_add_string(msg, AGENT_BUCKET, bucket);
if (region != NULL)
fnvlist_add_string(msg, AGENT_REGION, region);
if (endpoint != NULL)
fnvlist_add_string(msg, AGENT_ENDPOINT, endpoint);
if (protocol != NULL)
fnvlist_add_string(msg, AGENT_PROTOCOL, protocol);
if (profile != NULL)
fnvlist_add_string(msg, AGENT_CRED_PROFILE, profile);

nvlist_t *resp = zoa_send_recv_msg(&handle, msg,
AGENT_PUBLIC_PROTOCOL_VERSION, ZFS_ROOT_SOCKET, NULL);
Expand Down
12 changes: 12 additions & 0 deletions module/os/linux/zfs/zpl_super.c
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,18 @@ __zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
}
#endif /* CONFIG_FS_POSIX_ACL */

switch (zfsvfs->z_case) {
case ZFS_CASE_SENSITIVE:
seq_puts(seq, ",casesensitive");
break;
case ZFS_CASE_INSENSITIVE:
seq_puts(seq, ",caseinsensitive");
break;
default:
seq_puts(seq, ",casemixed");
break;
}

return (0);
}

Expand Down
30 changes: 26 additions & 4 deletions scripts/zloop.sh
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ credentials_in_env() {
return 0
fi
;;
s3)
s3|true)
if [ -n "$AWS_ACCESS_KEY_ID" ] && \
[ -n "$AWS_SECRET_ACCESS_KEY" ]; then \
return 0
Expand All @@ -209,6 +209,7 @@ credentials_in_env() {
return 1
;;
esac
return 1
}

# Configures and sets the object storage credentials to the disk
Expand All @@ -222,7 +223,7 @@ configure_object_store_credentials() {
sudo mkdir -p /root/.azure && \
sudo cp ~/.azure/credentials /root/.azure/credentials
;;
s3)
s3|true)
# Check and comment out the AWS_ environment variables
# from the /etc/environment file
if grep -q "^AWS" /etc/environment 2>/dev/null; then
Expand All @@ -245,6 +246,23 @@ configure_object_store_credentials() {
esac
}

#
# Determine if the test is using an IAM role to access the S3 bucket
# or via the secret keys
#
# Return 0 if using IAM, 1 if otherwise
#
function is_using_iam_role
{
# When using IAM role both of the env variables AWS_SECRET_ACCESS_KEY
# and AWS_ACCESS_KEY_ID remains empty or zero length
if [ -n "$AWS_ACCESS_KEY_ID" ] && \
[ -n "$AWS_SECRET_ACCESS_KEY" ]; then
return 1
fi
return 0
}

# parse arguments
# expected format: zloop [-t timeout] [-c coredir] [-- extra ztest args]
coredir=$DEFAULTCOREDIR
Expand Down Expand Up @@ -336,12 +354,16 @@ while (( timeout == 0 )) || (( curtime <= (starttime + timeout) )); do
blob)
# Blob storage requires no special arguments.
;;
s3)
s3|true)
# Convert legacy values of 'true' to an s3 default
ZTS_OBJECT_STORE="s3"
zopt="$zopt -O $ZTS_OBJECT_ENDPOINT"
zopt="$zopt -A $ZTS_REGION"
[ -z "$ZTS_CREDS_PROFILE" ] && \
ZTS_CREDS_PROFILE="default"
zopt="$zopt -z $ZTS_CREDS_PROFILE"
if ! is_using_iam_role; then
zopt="$zopt -z $ZTS_CREDS_PROFILE"
fi
;;
*)
echo "Unknown object store $ZTS_OBJECT_STORE"
Expand Down
1 change: 1 addition & 0 deletions tests/zfs-tests/include/commands.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
export SYSTEM_FILES_COMMON='arp
awk
aws
az
base64
basename
bc
Expand Down
7 changes: 5 additions & 2 deletions tests/zfs-tests/include/libtest.shlib
Original file line number Diff line number Diff line change
Expand Up @@ -1715,10 +1715,13 @@ function invalidate_zcache
{
log_note "Invalidate Zettacache device(s)"
sudo systemctl stop zfs-object-agent
for cache_dev in ${ZETTACACHE_DEVICES}; do
for cache_dev in $ZETTACACHE_DEVICES; do
invalidate_zcache_dev $cache_dev
done
sudo systemctl start zfs-object-agent
for cache_dev in $ZETTACACHE_DEVICES; do
zcache add "$(get_cache_part $cache_dev)"
done
}

#
Expand Down Expand Up @@ -1889,7 +1892,7 @@ function create_pool

# If Zettacache devices were specified, they have been partitioned to
# also contain a partition for a slog. Add the log devices here.
for cache_dev in ${ZETTACACHE_DEVICES}; do
for cache_dev in $ZETTACACHE_DEVICES; do
typeset log_dev=$(get_slog_part $cache_dev)
log_must zpool add $pool log $log_dev
done
Expand Down
63 changes: 59 additions & 4 deletions tests/zfs-tests/include/object_store.shlib
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,61 @@ function pool_exists_s3
return 1
}

#
# Returns if a pool exists in the azure blob.
#
function pool_exists_blob
{
typeset guid=$1

# Max num of retries (default 5)
typeset num_retries=${2:-5}

# Constant backoff duration (default 2s)
typeset retry_backoff_duration=${3:-2}

typeset retry_count=0

# Check if the pool exists in the azure blob.
# If the exit code is 0, return immediately
# else retry for the given duration (default 10s)
while [ $retry_count -le $num_retries ]; do
count=$(az storage blob list \
--container-name $ZTS_BUCKET_NAME \
--account-name $AZURE_ACCOUNT \
--account-key $AZURE_KEY \
--prefix zfs/$guid/ \
--output table \
| awk -F '/' '/zfs/ {print $2}' \
| sort -u | wc -l)

[ $count -eq 1 ] && return 0
retry_count=$((retry_count + 1))
sleep $retry_backoff_duration
done
return 1
}

#
# Returns that the pool corresponding to the guid
# exists in the object store
#
function pool_exists_object_store
{
typeset guid=$1
typeset -i rc=1

if [ $ZTS_OBJECT_STORE == "s3" ]; then
pool_exists_s3 $guid
rc=$?
elif [ $ZTS_OBJECT_STORE == "blob" ]; then
pool_exists_blob $guid
rc=$?
fi
return $rc
}


#
# Verify if the object store pool is online. Verify the allocated space.
# Also verify that the pool exists in the object store bucket.
Expand Down Expand Up @@ -98,8 +153,8 @@ function verify_active_object_store_pool # pool guid exp_allocated
done
log_must [ $is_allocated -eq 1 ]

# Verify that the pool exists in the bucket
log_must pool_exists_s3 $guid
# Verify that the pool exists in the object store
log_must pool_exists_object_store $guid
}

#
Expand Down Expand Up @@ -170,8 +225,8 @@ function verify_destroyed_object_store_pool # pool guid
# Verify that the pool GUID should be part of zpool_destroy.cache
log_must cat /etc/zfs/zpool_destroy.cache | grep $guid

# Verify that the pool no longer exists in the bucket
log_mustnot pool_exists_s3 $guid
# Verify that the pool no longer exists in the object store
log_mustnot pool_exists_object_store $guid
}

#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,23 @@ typeset guid=$(get_object_store_pool_guid $TESTPOOL)
#
log_must test "$guid" != "00000000000000000000"

typeset num_pending_frees_splits=$(aws --endpoint-url $ZTS_OBJECT_ENDPOINT \
s3 ls $ZTS_BUCKET_NAME/zfs/$guid/PendingFreesLog/ | wc -l)
typeset -i num_pending_frees_splits=0

if [ $ZTS_OBJECT_STORE == "s3" ]; then
num_pending_frees_splits=$(aws --endpoint-url $ZTS_OBJECT_ENDPOINT \
s3 ls $ZTS_BUCKET_NAME/zfs/$guid/PendingFreesLog/ | wc -l)
elif [ $ZTS_OBJECT_STORE == "blob" ]; then
# This list all objects, to get the split count
# extract the 4th column separated by '/'
# zfs/08878726368137311436/PendingFreesLog/00000/00000000000000000000/...
num_pending_frees_splits=$(az storage blob list -c $ZTS_BUCKET_NAME \
--account-name $AZURE_ACCOUNT --account-key $AZURE_KEY \
--output table \
--prefix zfs/$guid/PendingFrees 2>/dev/null \
| awk -F '/' '/zfs/ {print $4}' \
| sort -u | wc -l)
fi

log_note "Total no of pending frees log split $num_pending_frees_splits"
#
# The pending frees log can hold approximately 10 million objects
Expand All @@ -109,8 +124,18 @@ log_must test $num_pending_frees_splits -gt 0
# A recursive call to list the parent object (PendingFreesLog)
# can help in summarizing the child count
#
typeset num_pending_frees_objects=$(aws --endpoint-url $ZTS_OBJECT_ENDPOINT \
s3 ls $ZTS_BUCKET_NAME/zfs/$guid/PendingFreesLog/ --recursive | wc -l)
typeset -i num_pending_frees_objects=0

if [ $ZTS_OBJECT_STORE == "s3" ]; then
num_pending_frees_objects=$(aws --endpoint-url $ZTS_OBJECT_ENDPOINT \
s3 ls $ZTS_BUCKET_NAME/zfs/$guid/PendingFreesLog/ --recursive | wc -l)
elif [ $ZTS_OBJECT_STORE == "blob" ]; then
num_pending_frees_objects=$(az storage blob list -c $ZTS_BUCKET_NAME \
--account-name $AZURE_ACCOUNT --account-key $AZURE_KEY \
--output table \
--prefix zfs/$guid/PendingFrees 2>/dev/null | awk '/zfs/' | wc -l)
fi

log_note "Total no of pending frees objects $num_pending_frees_objects"
log_must test $num_pending_frees_objects -gt 0

Expand Down
Loading

0 comments on commit 9cdcc7e

Please sign in to comment.