diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c index 75c0e40b610c..51a0fbd43c14 100644 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@ -134,6 +134,10 @@ static int zfs_do_unzone(int argc, char **argv); static int zfs_do_help(int argc, char **argv); +enum zfs_options { + ZFS_OPTION_JSON_NUMS_AS_INT = 1024 +}; + /* * Enable a reasonable set of defaults for libumem debugging on DEBUG builds. */ @@ -272,6 +276,8 @@ static zfs_command_t command_table[] = { #define NCOMMAND (sizeof (command_table) / sizeof (command_table[0])) +#define MAX_CMD_LEN 256 + zfs_command_t *current_command; static const char * @@ -292,7 +298,7 @@ get_usage(zfs_help_t idx) "@[%][,...]\n" "\tdestroy #\n")); case HELP_GET: - return (gettext("\tget [-rHp] [-d max] " + return (gettext("\tget [-rHp] [-j [--json-int]] [-d max] " "[-o \"all\" | field[,...]]\n" "\t [-t type[,...]] [-s source[,...]]\n" "\t <\"all\" | property[,...]> " @@ -304,11 +310,12 @@ get_usage(zfs_help_t idx) return (gettext("\tupgrade [-v]\n" "\tupgrade [-r] [-V version] <-a | filesystem ...>\n")); case HELP_LIST: - return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] " - "[-s property]...\n\t [-S property]... [-t type[,...]] " + return (gettext("\tlist [-Hp] [-j [--json-int]] [-r|-d max] " + "[-o property[,...]] [-s property]...\n\t " + "[-S property]... [-t type[,...]] " "[filesystem|volume|snapshot] ...\n")); case HELP_MOUNT: - return (gettext("\tmount\n" + return (gettext("\tmount [-j]\n" "\tmount [-flvO] [-o opts] <-a|-R filesystem|" "filesystem>\n")); case HELP_PROMOTE: @@ -420,7 +427,7 @@ get_usage(zfs_help_t idx) "\t \n" "\tchange-key -i [-l] \n")); case HELP_VERSION: - return (gettext("\tversion\n")); + return (gettext("\tversion [-j]\n")); case HELP_REDACT: return (gettext("\tredact " " ...\n")); @@ -1885,7 +1892,89 @@ is_recvd_column(zprop_get_cbdata_t *cbp) } /* - * zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...] + * Generates an nvlist with output version for every command based on params. + * Purpose of this is to add a version of JSON output, considering the schema + * format might be updated for each command in future. + * + * Schema: + * + * "output_version": { + * "command": string, + * "vers_major": integer, + * "vers_minor": integer, + * } + */ +static nvlist_t * +zfs_json_schema(int maj_v, int min_v) +{ + nvlist_t *sch = NULL; + nvlist_t *ov = NULL; + char cmd[MAX_CMD_LEN]; + snprintf(cmd, MAX_CMD_LEN, "zfs %s", current_command->name); + + sch = fnvlist_alloc(); + ov = fnvlist_alloc(); + fnvlist_add_string(ov, "command", cmd); + fnvlist_add_uint32(ov, "vers_major", maj_v); + fnvlist_add_uint32(ov, "vers_minor", min_v); + fnvlist_add_nvlist(sch, "output_version", ov); + fnvlist_free(ov); + return (sch); +} + +static void +fill_dataset_info(nvlist_t *list, zfs_handle_t *zhp, boolean_t as_int) +{ + char createtxg[ZFS_MAXPROPLEN]; + zfs_type_t type = zfs_get_type(zhp); + nvlist_add_string(list, "name", zfs_get_name(zhp)); + + switch (type) { + case ZFS_TYPE_FILESYSTEM: + fnvlist_add_string(list, "type", "FILESYSTEM"); + break; + case ZFS_TYPE_VOLUME: + fnvlist_add_string(list, "type", "VOLUME"); + break; + case ZFS_TYPE_SNAPSHOT: + fnvlist_add_string(list, "type", "SNAPSHOT"); + break; + case ZFS_TYPE_POOL: + fnvlist_add_string(list, "type", "POOL"); + break; + case ZFS_TYPE_BOOKMARK: + fnvlist_add_string(list, "type", "BOOKMARK"); + break; + default: + fnvlist_add_string(list, "type", "UNKNOWN"); + break; + } + + if (type != ZFS_TYPE_POOL) + fnvlist_add_string(list, "pool", zfs_get_pool_name(zhp)); + + if (as_int) { + fnvlist_add_uint64(list, "createtxg", zfs_prop_get_int(zhp, + ZFS_PROP_CREATETXG)); + } else { + if (zfs_prop_get(zhp, ZFS_PROP_CREATETXG, createtxg, + sizeof (createtxg), NULL, NULL, 0, B_TRUE) == 0) + fnvlist_add_string(list, "createtxg", createtxg); + } + + if (type == ZFS_TYPE_SNAPSHOT) { + char *ds, *snap; + ds = snap = strdup(zfs_get_name(zhp)); + ds = strsep(&snap, "@"); + fnvlist_add_string(list, "dataset", ds); + fnvlist_add_string(list, "snapshot_name", snap); + free(ds); + } +} + +/* + * zfs get [-rHp] [-j [--json-int]] [-o all | field[,field]...] + * [-s source[,source]...] * < all | property[,property]... > < fs | snap | vol > ... * * -r recurse over any child datasets @@ -1898,6 +1987,8 @@ is_recvd_column(zprop_get_cbdata_t *cbp) * "local,default,inherited,received,temporary,none". Default is * all six. * -p Display values in parsable (literal) format. + * -j Display output in JSON format. + * --json-int Display numbers as integers instead of strings. * * Prints properties for the given datasets. The user can control which * columns to display as well as which property types to allow. @@ -1917,9 +2008,21 @@ get_callback(zfs_handle_t *zhp, void *data) nvlist_t *user_props = zfs_get_user_props(zhp); zprop_list_t *pl = cbp->cb_proplist; nvlist_t *propval; + nvlist_t *item, *d, *props; + item = d = props = NULL; const char *strval; const char *sourceval; boolean_t received = is_recvd_column(cbp); + int err = 0; + + if (cbp->cb_json) { + d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "datasets"); + if (d == NULL) { + fprintf(stderr, "datasets obj not found.\n"); + exit(1); + } + props = fnvlist_alloc(); + } for (; pl != NULL; pl = pl->pl_next) { char *recvdval = NULL; @@ -1954,9 +2057,9 @@ get_callback(zfs_handle_t *zhp, void *data) cbp->cb_literal) == 0)) recvdval = rbuf; - zprop_print_one_property(zfs_get_name(zhp), cbp, + err = zprop_collect_property(zfs_get_name(zhp), cbp, zfs_prop_to_name(pl->pl_prop), - buf, sourcetype, source, recvdval); + buf, sourcetype, source, recvdval, props); } else if (zfs_prop_userquota(pl->pl_user_prop)) { sourcetype = ZPROP_SRC_LOCAL; @@ -1966,8 +2069,9 @@ get_callback(zfs_handle_t *zhp, void *data) (void) strlcpy(buf, "-", sizeof (buf)); } - zprop_print_one_property(zfs_get_name(zhp), cbp, - pl->pl_user_prop, buf, sourcetype, source, NULL); + err = zprop_collect_property(zfs_get_name(zhp), cbp, + pl->pl_user_prop, buf, sourcetype, source, NULL, + props); } else if (zfs_prop_written(pl->pl_user_prop)) { sourcetype = ZPROP_SRC_LOCAL; @@ -1977,8 +2081,9 @@ get_callback(zfs_handle_t *zhp, void *data) (void) strlcpy(buf, "-", sizeof (buf)); } - zprop_print_one_property(zfs_get_name(zhp), cbp, - pl->pl_user_prop, buf, sourcetype, source, NULL); + err = zprop_collect_property(zfs_get_name(zhp), cbp, + pl->pl_user_prop, buf, sourcetype, source, NULL, + props); } else { if (nvlist_lookup_nvlist(user_props, pl->pl_user_prop, &propval) != 0) { @@ -2010,9 +2115,24 @@ get_callback(zfs_handle_t *zhp, void *data) cbp->cb_literal) == 0)) recvdval = rbuf; - zprop_print_one_property(zfs_get_name(zhp), cbp, + err = zprop_collect_property(zfs_get_name(zhp), cbp, pl->pl_user_prop, strval, sourcetype, - source, recvdval); + source, recvdval, props); + } + if (err != 0) + return (err); + } + + if (cbp->cb_json) { + if (!nvlist_empty(props)) { + item = fnvlist_alloc(); + fill_dataset_info(item, zhp, cbp->cb_json_as_int); + fnvlist_add_nvlist(item, "properties", props); + fnvlist_add_nvlist(d, zfs_get_name(zhp), item); + fnvlist_free(props); + fnvlist_free(item); + } else { + fnvlist_free(props); } } @@ -2029,6 +2149,7 @@ zfs_do_get(int argc, char **argv) int ret = 0; int limit = 0; zprop_list_t fake_name = { 0 }; + nvlist_t *data; /* * Set up default columns and sources. @@ -2040,8 +2161,14 @@ zfs_do_get(int argc, char **argv) cb.cb_columns[3] = GET_COL_SOURCE; cb.cb_type = ZFS_TYPE_DATASET; + struct option long_options[] = { + {"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT}, + {0, 0, 0, 0} + }; + /* check options */ - while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) { + while ((c = getopt_long(argc, argv, ":d:o:s:jrt:Hp", long_options, + NULL)) != -1) { switch (c) { case 'p': cb.cb_literal = B_TRUE; @@ -2055,6 +2182,17 @@ zfs_do_get(int argc, char **argv) case 'H': cb.cb_scripted = B_TRUE; break; + case 'j': + cb.cb_json = B_TRUE; + cb.cb_jsobj = zfs_json_schema(0, 1); + data = fnvlist_alloc(); + fnvlist_add_nvlist(cb.cb_jsobj, "datasets", data); + fnvlist_free(data); + break; + case ZFS_OPTION_JSON_NUMS_AS_INT: + cb.cb_json_as_int = B_TRUE; + cb.cb_literal = B_TRUE; + break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); @@ -2178,7 +2316,6 @@ found2:; found3:; } break; - case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); @@ -2195,6 +2332,12 @@ found3:; usage(B_FALSE); } + if (!cb.cb_json && cb.cb_json_as_int) { + (void) fprintf(stderr, gettext("'--json-int' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + fields = argv[0]; /* @@ -2235,6 +2378,11 @@ found3:; ret = zfs_for_each(argc, argv, flags, types, NULL, &cb.cb_proplist, limit, get_callback, &cb); + if (ret == 0 && cb.cb_json) + zcmd_print_json(cb.cb_jsobj); + else if (ret != 0 && cb.cb_json) + nvlist_free(cb.cb_jsobj); + if (cb.cb_proplist == &fake_name) zprop_free_list(fake_name.pl_next); else @@ -3442,6 +3590,9 @@ typedef struct list_cbdata { boolean_t cb_literal; boolean_t cb_scripted; zprop_list_t *cb_proplist; + boolean_t cb_json; + nvlist_t *cb_jsobj; + boolean_t cb_json_as_int; } list_cbdata_t; /* @@ -3512,10 +3663,11 @@ zfs_list_avail_color(zfs_handle_t *zhp) /* * Given a dataset and a list of fields, print out all the properties according - * to the described layout. + * to the described layout, or return an nvlist containing all the fields, later + * to be printed out as JSON object. */ static void -print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb) +collect_dataset(zfs_handle_t *zhp, list_cbdata_t *cb) { zprop_list_t *pl = cb->cb_proplist; boolean_t first = B_TRUE; @@ -3524,9 +3676,23 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb) nvlist_t *propval; const char *propstr; boolean_t right_justify; + nvlist_t *item, *d, *props; + item = d = props = NULL; + zprop_source_t sourcetype = ZPROP_SRC_NONE; + char source[ZFS_MAX_DATASET_NAME_LEN]; + if (cb->cb_json) { + d = fnvlist_lookup_nvlist(cb->cb_jsobj, "datasets"); + if (d == NULL) { + fprintf(stderr, "datasets obj not found.\n"); + exit(1); + } + item = fnvlist_alloc(); + props = fnvlist_alloc(); + fill_dataset_info(item, zhp, cb->cb_json_as_int); + } for (; pl != NULL; pl = pl->pl_next) { - if (!first) { + if (!cb->cb_json && !first) { if (cb->cb_scripted) (void) putchar('\t'); else @@ -3542,69 +3708,112 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb) right_justify = zfs_prop_align_right(pl->pl_prop); } else if (pl->pl_prop != ZPROP_USERPROP) { if (zfs_prop_get(zhp, pl->pl_prop, property, - sizeof (property), NULL, NULL, 0, - cb->cb_literal) != 0) + sizeof (property), &sourcetype, source, + sizeof (source), cb->cb_literal) != 0) propstr = "-"; else propstr = property; right_justify = zfs_prop_align_right(pl->pl_prop); } else if (zfs_prop_userquota(pl->pl_user_prop)) { + sourcetype = ZPROP_SRC_LOCAL; if (zfs_prop_get_userquota(zhp, pl->pl_user_prop, - property, sizeof (property), cb->cb_literal) != 0) + property, sizeof (property), cb->cb_literal) != 0) { + sourcetype = ZPROP_SRC_NONE; propstr = "-"; - else + } else { propstr = property; + } right_justify = B_TRUE; } else if (zfs_prop_written(pl->pl_user_prop)) { + sourcetype = ZPROP_SRC_LOCAL; if (zfs_prop_get_written(zhp, pl->pl_user_prop, - property, sizeof (property), cb->cb_literal) != 0) + property, sizeof (property), cb->cb_literal) != 0) { + sourcetype = ZPROP_SRC_NONE; propstr = "-"; - else + } else { propstr = property; + } right_justify = B_TRUE; } else { if (nvlist_lookup_nvlist(userprops, - pl->pl_user_prop, &propval) != 0) + pl->pl_user_prop, &propval) != 0) { propstr = "-"; - else + } else { propstr = fnvlist_lookup_string(propval, ZPROP_VALUE); + strlcpy(source, + fnvlist_lookup_string(propval, + ZPROP_SOURCE), ZFS_MAX_DATASET_NAME_LEN); + if (strcmp(source, + zfs_get_name(zhp)) == 0) { + sourcetype = ZPROP_SRC_LOCAL; + } else if (strcmp(source, + ZPROP_SOURCE_VAL_RECVD) == 0) { + sourcetype = ZPROP_SRC_RECEIVED; + } else { + sourcetype = ZPROP_SRC_INHERITED; + } + } right_justify = B_FALSE; } - /* - * zfs_list_avail_color() needs ZFS_PROP_AVAILABLE + USED - * - so we need another for() search for the USED part - * - when no colors wanted, we can skip the whole thing - */ - if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) { - zprop_list_t *pl2 = cb->cb_proplist; - for (; pl2 != NULL; pl2 = pl2->pl_next) { - if (pl2->pl_prop == ZFS_PROP_USED) { - color_start(zfs_list_avail_color(zhp)); - /* found it, no need for more loops */ - break; + if (cb->cb_json) { + if (pl->pl_prop == ZFS_PROP_NAME) + continue; + if (zprop_nvlist_one_property( + zfs_prop_to_name(pl->pl_prop), propstr, + sourcetype, source, NULL, props, + cb->cb_json_as_int) != 0) + nomem(); + } else { + /* + * zfs_list_avail_color() needs + * ZFS_PROP_AVAILABLE + USED, so we need another + * for() search for the USED part when no colors + * wanted, we can skip the whole thing + */ + if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) { + zprop_list_t *pl2 = cb->cb_proplist; + for (; pl2 != NULL; pl2 = pl2->pl_next) { + if (pl2->pl_prop == ZFS_PROP_USED) { + color_start( + zfs_list_avail_color(zhp)); + /* + * found it, no need for more + * loops + */ + break; + } } } - } - /* - * If this is being called in scripted mode, or if this is the - * last column and it is left-justified, don't include a width - * format specifier. - */ - if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) - (void) fputs(propstr, stdout); - else if (right_justify) - (void) printf("%*s", (int)pl->pl_width, propstr); - else - (void) printf("%-*s", (int)pl->pl_width, propstr); + /* + * If this is being called in scripted mode, or if + * this is the last column and it is left-justified, + * don't include a width format specifier. + */ + if (cb->cb_scripted || (pl->pl_next == NULL && + !right_justify)) + (void) fputs(propstr, stdout); + else if (right_justify) { + (void) printf("%*s", (int)pl->pl_width, + propstr); + } else { + (void) printf("%-*s", (int)pl->pl_width, + propstr); + } - if (pl->pl_prop == ZFS_PROP_AVAILABLE) - color_end(); + if (pl->pl_prop == ZFS_PROP_AVAILABLE) + color_end(); + } } - - (void) putchar('\n'); + if (cb->cb_json) { + fnvlist_add_nvlist(item, "properties", props); + fnvlist_add_nvlist(d, zfs_get_name(zhp), item); + fnvlist_free(props); + fnvlist_free(item); + } else + (void) putchar('\n'); } /* @@ -3616,12 +3825,12 @@ list_callback(zfs_handle_t *zhp, void *data) list_cbdata_t *cbp = data; if (cbp->cb_first) { - if (!cbp->cb_scripted) + if (!cbp->cb_scripted && !cbp->cb_json) print_header(cbp); cbp->cb_first = B_FALSE; } - print_dataset(zhp, cbp); + collect_dataset(zhp, cbp); return (0); } @@ -3640,9 +3849,16 @@ zfs_do_list(int argc, char **argv) int ret = 0; zfs_sort_column_t *sortcol = NULL; int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS; + nvlist_t *data = NULL; + + struct option long_options[] = { + {"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT}, + {0, 0, 0, 0} + }; /* check options */ - while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) { + while ((c = getopt_long(argc, argv, "jHS:d:o:prs:t:", long_options, + NULL)) != -1) { switch (c) { case 'o': fields = optarg; @@ -3657,6 +3873,17 @@ zfs_do_list(int argc, char **argv) case 'r': flags |= ZFS_ITER_RECURSE; break; + case 'j': + cb.cb_json = B_TRUE; + cb.cb_jsobj = zfs_json_schema(0, 1); + data = fnvlist_alloc(); + fnvlist_add_nvlist(cb.cb_jsobj, "datasets", data); + fnvlist_free(data); + break; + case ZFS_OPTION_JSON_NUMS_AS_INT: + cb.cb_json_as_int = B_TRUE; + cb.cb_literal = B_TRUE; + break; case 'H': cb.cb_scripted = B_TRUE; break; @@ -3730,6 +3957,12 @@ found3:; argc -= optind; argv += optind; + if (!cb.cb_json && cb.cb_json_as_int) { + (void) fprintf(stderr, gettext("'--json-int' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + /* * If "-o space" and no types were specified, don't display snapshots. */ @@ -3769,6 +4002,11 @@ found3:; ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist, limit, list_callback, &cb); + if (ret == 0 && cb.cb_json) + zcmd_print_json(cb.cb_jsobj); + else if (ret != 0 && cb.cb_json) + nvlist_free(cb.cb_jsobj); + zprop_free_list(cb.cb_proplist); zfs_free_sort_columns(sortcol); @@ -7189,14 +7427,17 @@ share_mount(int op, int argc, char **argv) int do_all = 0; int recursive = 0; boolean_t verbose = B_FALSE; + boolean_t json = B_FALSE; int c, ret = 0; char *options = NULL; int flags = 0; + nvlist_t *jsobj, *data, *item; const uint_t mount_nthr = 512; uint_t nthr; + jsobj = data = item = NULL; /* check options */ - while ((c = getopt(argc, argv, op == OP_MOUNT ? ":aRlvo:Of" : "al")) + while ((c = getopt(argc, argv, op == OP_MOUNT ? ":ajRlvo:Of" : "al")) != -1) { switch (c) { case 'a': @@ -7211,6 +7452,11 @@ share_mount(int op, int argc, char **argv) case 'l': flags |= MS_CRYPT; break; + case 'j': + json = B_TRUE; + jsobj = zfs_json_schema(0, 1); + data = fnvlist_alloc(); + break; case 'o': if (*optarg == '\0') { (void) fprintf(stderr, gettext("empty mount " @@ -7245,6 +7491,11 @@ share_mount(int op, int argc, char **argv) argc -= optind; argv += optind; + if (json && argc != 0) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + /* check number of arguments */ if (do_all || recursive) { enum sa_protocol protocol = SA_NO_PROTOCOL; @@ -7348,12 +7599,30 @@ share_mount(int op, int argc, char **argv) if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 || strchr(entry.mnt_special, '@') != NULL) continue; - - (void) printf("%-30s %s\n", entry.mnt_special, - entry.mnt_mountp); + if (json) { + item = fnvlist_alloc(); + fnvlist_add_string(item, "filesystem", + entry.mnt_special); + fnvlist_add_string(item, "mountpoint", + entry.mnt_mountp); + fnvlist_add_nvlist(data, entry.mnt_special, + item); + fnvlist_free(item); + } else { + (void) printf("%-30s %s\n", entry.mnt_special, + entry.mnt_mountp); + } } (void) fclose(mnttab); + if (json) { + fnvlist_add_nvlist(jsobj, "datasets", data); + if (nvlist_empty(data)) + fnvlist_free(jsobj); + else + zcmd_print_json(jsobj); + fnvlist_free(data); + } } else { zfs_handle_t *zhp; @@ -8811,8 +9080,39 @@ found:; static int zfs_do_version(int argc, char **argv) { - (void) argc, (void) argv; - return (zfs_version_print() != 0); + int c; + nvlist_t *jsobj = NULL, *zfs_ver = NULL; + boolean_t json = B_FALSE; + while ((c = getopt(argc, argv, "j")) != -1) { + switch (c) { + case 'j': + json = B_TRUE; + jsobj = zfs_json_schema(0, 1); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + if (argc != 0) { + (void) fprintf(stderr, "too many arguments\n"); + usage(B_FALSE); + } + + if (json) { + zfs_ver = zfs_version_nvlist(); + if (zfs_ver) { + fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver); + zcmd_print_json(jsobj); + fnvlist_free(zfs_ver); + return (0); + } else + return (-1); + } else + return (zfs_version_print() != 0); } /* Display documentation */ diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index 1d4f59c4b863..620746f8e7bb 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -66,7 +66,7 @@ #include #include #include - +#include #include #include @@ -139,7 +139,10 @@ enum zpool_options { ZPOOL_OPTION_POWER = 1024, ZPOOL_OPTION_ALLOW_INUSE, ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH, - ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH + ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH, + ZPOOL_OPTION_POOL_KEY_GUID, + ZPOOL_OPTION_JSON_NUMS_AS_INT, + ZPOOL_OPTION_JSON_FLAT_VDEVS }; /* @@ -274,6 +277,86 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { NULL}, }; +static const char *pool_scan_func_str[] = { + "NONE", + "SCRUB", + "RESILVER", + "ERRORSCRUB" +}; + +static const char *pool_scan_state_str[] = { + "NONE", + "SCANNING", + "FINISHED", + "CANCELED", + "ERRORSCRUBBING" +}; + +static const char *vdev_rebuild_state_str[] = { + "NONE", + "ACTIVE", + "CANCELED", + "COMPLETE" +}; + +static const char *checkpoint_state_str[] = { + "NONE", + "EXISTS", + "DISCARDING" +}; + +static const char *vdev_state_str[] = { + "UNKNOWN", + "CLOSED", + "OFFLINE", + "REMOVED", + "CANT_OPEN", + "FAULTED", + "DEGRADED", + "ONLINE" +}; + +static const char *vdev_aux_str[] = { + "NONE", + "OPEN_FAILED", + "CORRUPT_DATA", + "NO_REPLICAS", + "BAD_GUID_SUM", + "TOO_SMALL", + "BAD_LABEL", + "VERSION_NEWER", + "VERSION_OLDER", + "UNSUP_FEAT", + "SPARED", + "ERR_EXCEEDED", + "IO_FAILURE", + "BAD_LOG", + "EXTERNAL", + "SPLIT_POOL", + "BAD_ASHIFT", + "EXTERNAL_PERSIST", + "ACTIVE", + "CHILDREN_OFFLINE", + "ASHIFT_TOO_BIG" +}; + +static const char *vdev_init_state_str[] = { + "NONE", + "ACTIVE", + "CANCELED", + "SUSPENDED", + "COMPLETE" +}; + +static const char *vdev_trim_state_str[] = { + "NONE", + "ACTIVE", + "CANCELED", + "SUSPENDED", + "COMPLETE" +}; + +#define ZFS_NICE_TIMESTAMP 100 /* * Given a cb->cb_flags with a histogram bit set, return the iostat_type. @@ -349,6 +432,8 @@ static zpool_command_t command_table[] = { #define VDEV_ALLOC_CLASS_LOGS "logs" +#define MAX_CMD_LEN 256 + static zpool_command_t *current_command; static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); static char history_str[HIS_MAX_RECORD_LEN]; @@ -398,9 +483,9 @@ get_usage(zpool_help_t idx) case HELP_LABELCLEAR: return (gettext("\tlabelclear [-f] \n")); case HELP_LIST: - return (gettext("\tlist [-gHLpPv] [-o property[,...]] " - "[-T d|u] [pool] ... \n" - "\t [interval [count]]\n")); + return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j " + "[--json-int, --json-pool-key-guid]] ...\n" + "\t [-T d|u] [pool] [interval [count]]\n")); case HELP_PREFETCH: return (gettext("\tprefetch -t [] \n" "\t -t ddt \n")); @@ -428,9 +513,11 @@ get_usage(zpool_help_t idx) return (gettext("\ttrim [-dw] [-r ] [-c | -s] " "[ ...]\n")); case HELP_STATUS: - return (gettext("\tstatus [--power] [-c [script1,script2,...]] " - "[-DegiLpPstvx] [-T d|u] [pool] ...\n" - "\t [interval [count]]\n")); + return (gettext("\tstatus [--power] [-j [--json-int, " + "--json-flat-vdevs, ...\n" + "\t --json-pool-key-guid]] [-c [script1,script2,...]] " + "[-DegiLpPstvx] ...\n" + "\t [-T d|u] [pool] [interval [count]]\n")); case HELP_UPGRADE: return (gettext("\tupgrade\n" "\tupgrade -v\n" @@ -438,7 +525,9 @@ get_usage(zpool_help_t idx) case HELP_EVENTS: return (gettext("\tevents [-vHf [pool] | -c]\n")); case HELP_GET: - return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] " + return (gettext("\tget [-Hp] [-j [--json-int, " + "--json-pool-key-guid]] ...\n" + "\t [-o \"all\" | field[,...]] " "<\"all\" | property[,...]> ...\n")); case HELP_SET: return (gettext("\tset \n" @@ -452,7 +541,7 @@ get_usage(zpool_help_t idx) case HELP_SYNC: return (gettext("\tsync [pool] ...\n")); case HELP_VERSION: - return (gettext("\tversion\n")); + return (gettext("\tversion [-j]\n")); case HELP_WAIT: return (gettext("\twait [-Hp] [-T d|u] [-t [,...]] " " [interval]\n")); @@ -896,6 +985,264 @@ print_spare_list(nvlist_t *nv, int indent) } } +typedef struct spare_cbdata { + uint64_t cb_guid; + zpool_handle_t *cb_zhp; +} spare_cbdata_t; + +static boolean_t +find_vdev(nvlist_t *nv, uint64_t search) +{ + uint64_t guid; + nvlist_t **child; + uint_t c, children; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && + search == guid) + return (B_TRUE); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if (find_vdev(child[c], search)) + return (B_TRUE); + } + + return (B_FALSE); +} + +static int +find_spare(zpool_handle_t *zhp, void *data) +{ + spare_cbdata_t *cbp = data; + nvlist_t *config, *nvroot; + + config = zpool_get_config(zhp, NULL); + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + if (find_vdev(nvroot, cbp->cb_guid)) { + cbp->cb_zhp = zhp; + return (1); + } + + zpool_close(zhp); + return (0); +} + +static void +nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value, + boolean_t literal, boolean_t as_int, int format) +{ + char buf[256]; + if (literal) { + if (!as_int) + snprintf(buf, 256, "%llu", (u_longlong_t)value); + } else { + switch (format) { + case ZFS_NICENUM_1024: + zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024); + break; + case ZFS_NICENUM_BYTES: + zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES); + break; + case ZFS_NICENUM_TIME: + zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME); + break; + case ZFS_NICE_TIMESTAMP: + format_timestamp(value, buf, 256); + break; + default: + fprintf(stderr, "Invalid number format"); + exit(1); + } + } + if (as_int) + fnvlist_add_uint64(item, key, value); + else + fnvlist_add_string(item, key, buf); +} + +/* + * Generates an nvlist with output version for every command based on params. + * Purpose of this is to add a version of JSON output, considering the schema + * format might be updated for each command in future. + * + * Schema: + * + * "output_version": { + * "command": string, + * "vers_major": integer, + * "vers_minor": integer, + * } + */ +static nvlist_t * +zpool_json_schema(int maj_v, int min_v) +{ + char cmd[MAX_CMD_LEN]; + nvlist_t *sch = fnvlist_alloc(); + nvlist_t *ov = fnvlist_alloc(); + + snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name); + fnvlist_add_string(ov, "command", cmd); + fnvlist_add_uint32(ov, "vers_major", maj_v); + fnvlist_add_uint32(ov, "vers_minor", min_v); + fnvlist_add_nvlist(sch, "output_version", ov); + fnvlist_free(ov); + return (sch); +} + +static void +fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype, + boolean_t as_int) +{ + nvlist_t *config = zpool_get_config(zhp, NULL); + uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID); + uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG); + + fnvlist_add_string(list, "name", zpool_get_name(zhp)); + if (addtype) + fnvlist_add_string(list, "type", "POOL"); + fnvlist_add_string(list, "state", zpool_get_state_str(zhp)); + if (as_int) { + if (guid) + fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid); + if (txg) + fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg); + fnvlist_add_uint64(list, "spa_version", SPA_VERSION); + fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION); + } else { + char value[ZFS_MAXPROPLEN]; + if (guid) { + snprintf(value, ZFS_MAXPROPLEN, "%llu", + (u_longlong_t)guid); + fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value); + } + if (txg) { + snprintf(value, ZFS_MAXPROPLEN, "%llu", + (u_longlong_t)txg); + fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value); + } + fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING); + fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING); + } +} + +static void +used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list) +{ + spare_cbdata_t spare_cb; + verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID, + &spare_cb.cb_guid) == 0); + if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { + if (strcmp(zpool_get_name(spare_cb.cb_zhp), + zpool_get_name(zhp)) != 0) { + fnvlist_add_string(list, "used_by", + zpool_get_name(spare_cb.cb_zhp)); + } + zpool_close(spare_cb.cb_zhp); + } +} + +static void +fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name, + boolean_t addtype, boolean_t as_int) +{ + boolean_t l2c = B_FALSE; + const char *path, *phys, *devid, *bias = NULL; + uint64_t hole = 0, log = 0, spare = 0; + vdev_stat_t *vs; + uint_t c; + nvlist_t *nvdev; + nvlist_t *nvdev_parent = NULL; + char *_name; + + if (strcmp(name, zpool_get_name(zhp)) != 0) + _name = name; + else + _name = (char *)"root-0"; + + nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL); + + fnvlist_add_string(list, "name", name); + if (addtype) + fnvlist_add_string(list, "type", "VDEV"); + if (nvdev) { + const char *type = fnvlist_lookup_string(nvdev, + ZPOOL_CONFIG_TYPE); + if (type) + fnvlist_add_string(list, "vdev_type", type); + uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID); + if (guid) { + if (as_int) { + fnvlist_add_uint64(list, "guid", guid); + } else { + char buf[ZFS_MAXPROPLEN]; + snprintf(buf, ZFS_MAXPROPLEN, "%llu", + (u_longlong_t)guid); + fnvlist_add_string(list, "guid", buf); + } + } + if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0) + fnvlist_add_string(list, "path", path); + if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH, + &phys) == 0) + fnvlist_add_string(list, "phys_path", phys); + if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID, + &devid) == 0) + fnvlist_add_string(list, "devid", devid); + (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log); + (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE, + &spare); + (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole); + if (hole) + fnvlist_add_string(list, "class", VDEV_TYPE_HOLE); + else if (l2c) + fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE); + else if (spare) + fnvlist_add_string(list, "class", VDEV_TYPE_SPARE); + else if (log) + fnvlist_add_string(list, "class", VDEV_TYPE_LOG); + else { + (void) nvlist_lookup_string(nvdev, + ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); + if (bias != NULL) + fnvlist_add_string(list, "class", bias); + else { + nvdev_parent = NULL; + nvdev_parent = zpool_find_parent_vdev(zhp, + _name, NULL, NULL, NULL); + + /* + * With a mirrored special device, the parent + * "mirror" vdev will have + * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special" + * not the leaf vdevs. If we're a leaf vdev + * in that case we need to look at our parent + * to see if they're "special" to know if we + * are "special" too. + */ + if (nvdev_parent) { + (void) nvlist_lookup_string( + nvdev_parent, + ZPOOL_CONFIG_ALLOCATION_BIAS, + &bias); + } + if (bias != NULL) + fnvlist_add_string(list, "class", bias); + else + fnvlist_add_string(list, "class", + "normal"); + } + } + if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS, + (uint64_t **)&vs, &c) == 0) { + fnvlist_add_string(list, "state", + vdev_state_str[vs->vs_state]); + } + } +} + static boolean_t prop_list_contains_feature(nvlist_t *proplist) { @@ -2234,51 +2581,6 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, return (max); } -typedef struct spare_cbdata { - uint64_t cb_guid; - zpool_handle_t *cb_zhp; -} spare_cbdata_t; - -static boolean_t -find_vdev(nvlist_t *nv, uint64_t search) -{ - uint64_t guid; - nvlist_t **child; - uint_t c, children; - - if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && - search == guid) - return (B_TRUE); - - if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, - &child, &children) == 0) { - for (c = 0; c < children; c++) - if (find_vdev(child[c], search)) - return (B_TRUE); - } - - return (B_FALSE); -} - -static int -find_spare(zpool_handle_t *zhp, void *data) -{ - spare_cbdata_t *cbp = data; - nvlist_t *config, *nvroot; - - config = zpool_get_config(zhp, NULL); - verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, - &nvroot) == 0); - - if (find_vdev(nvroot, cbp->cb_guid)) { - cbp->cb_zhp = zhp; - return (1); - } - - zpool_close(zhp); - return (0); -} - typedef struct status_cbdata { int cb_count; int cb_name_flags; @@ -2296,6 +2598,11 @@ typedef struct status_cbdata { boolean_t cb_print_vdev_trim; vdev_cmd_data_list_t *vcdl; boolean_t cb_print_power; + boolean_t cb_json; + boolean_t cb_flat_vdevs; + nvlist_t *cb_jsobj; + boolean_t cb_json_as_int; + boolean_t cb_json_pool_key_guid; } status_cbdata_t; /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ @@ -2308,6 +2615,46 @@ is_blank_str(const char *str) return (B_TRUE); } +static void +zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path, + nvlist_t *item) +{ + vdev_cmd_data_t *data; + int i, j, k = 1; + char tmp[256]; + const char *val; + + for (i = 0; i < vcdl->count; i++) { + if ((strcmp(vcdl->data[i].path, path) != 0) || + (strcmp(vcdl->data[i].pool, pool) != 0)) + continue; + + data = &vcdl->data[i]; + for (j = 0; j < vcdl->uniq_cols_cnt; j++) { + val = NULL; + for (int k = 0; k < data->cols_cnt; k++) { + if (strcmp(data->cols[k], + vcdl->uniq_cols[j]) == 0) { + val = data->lines[k]; + break; + } + } + if (val == NULL || is_blank_str(val)) + val = "-"; + fnvlist_add_string(item, vcdl->uniq_cols[j], val); + } + + for (j = data->cols_cnt; j < data->lines_cnt; j++) { + if (data->lines[j]) { + snprintf(tmp, 256, "extra_%d", k++); + fnvlist_add_string(item, tmp, + data->lines[j]); + } + } + break; + } +} + /* Print command output lines for specific vdev in a specific pool */ static void zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path) @@ -3023,6 +3370,7 @@ show_import(nvlist_t *config, boolean_t report_error) uint_t vsc; const char *comment; const char *indent; + char buf[2048]; status_cbdata_t cb = { 0 }; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, @@ -3128,7 +3476,8 @@ show_import(nvlist_t *config, boolean_t report_error) printf_color(ANSI_YELLOW, gettext("The pool uses the following " "feature(s) not supported on this system:\n")); color_start(ANSI_YELLOW); - zpool_print_unsup_feat(config); + zpool_collect_unsup_feat(config, buf, 2048); + (void) printf("%s", buf); color_end(); break; @@ -3140,7 +3489,8 @@ show_import(nvlist_t *config, boolean_t report_error) "\t%sfeature(s) not supported on this system:\n"), indent, indent); color_start(ANSI_YELLOW); - zpool_print_unsup_feat(config); + zpool_collect_unsup_feat(config, buf, 2048); + (void) printf("%s", buf); color_end(); break; @@ -6375,9 +6725,13 @@ typedef struct list_cbdata { boolean_t cb_verbose; int cb_name_flags; int cb_namewidth; + boolean_t cb_json; boolean_t cb_scripted; zprop_list_t *cb_proplist; boolean_t cb_literal; + nvlist_t *cb_jsobj; + boolean_t cb_json_as_int; + boolean_t cb_json_pool_key_guid; } list_cbdata_t; @@ -6438,7 +6792,7 @@ print_header(list_cbdata_t *cb) * to the described layout. Used by zpool_do_list(). */ static void -print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) +collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb) { zprop_list_t *pl = cb->cb_proplist; boolean_t first = B_TRUE; @@ -6446,6 +6800,20 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) const char *propstr; boolean_t right_justify; size_t width; + zprop_source_t sourcetype = ZPROP_SRC_NONE; + nvlist_t *item, *d, *props; + item = d = props = NULL; + + if (cb->cb_json) { + item = fnvlist_alloc(); + props = fnvlist_alloc(); + d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools"); + if (d == NULL) { + fprintf(stderr, "pools obj not found.\n"); + exit(1); + } + fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int); + } for (; pl != NULL; pl = pl->pl_next) { @@ -6458,7 +6826,7 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) width = cb->cb_namewidth; } - if (!first) { + if (!cb->cb_json && !first) { if (cb->cb_scripted) (void) fputc('\t', stdout); else @@ -6470,7 +6838,8 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) right_justify = B_FALSE; if (pl->pl_prop != ZPROP_USERPROP) { if (zpool_get_prop(zhp, pl->pl_prop, property, - sizeof (property), NULL, cb->cb_literal) != 0) + sizeof (property), &sourcetype, + cb->cb_literal) != 0) propstr = "-"; else propstr = property; @@ -6481,33 +6850,61 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) zpool_prop_get_feature(zhp, pl->pl_user_prop, property, sizeof (property)) == 0) { propstr = property; + sourcetype = ZPROP_SRC_LOCAL; } else if (zfs_prop_user(pl->pl_user_prop) && zpool_get_userprop(zhp, pl->pl_user_prop, property, - sizeof (property), NULL) == 0) { + sizeof (property), &sourcetype) == 0) { propstr = property; } else { propstr = "-"; } - /* - * If this is being called in scripted mode, or if this is the - * last column and it is left-justified, don't include a width - * format specifier. - */ - if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) - (void) fputs(propstr, stdout); - else if (right_justify) - (void) printf("%*s", (int)width, propstr); - else - (void) printf("%-*s", (int)width, propstr); + if (cb->cb_json) { + if (pl->pl_prop == ZPOOL_PROP_NAME) + continue; + (void) zprop_nvlist_one_property( + zpool_prop_to_name(pl->pl_prop), propstr, + sourcetype, NULL, NULL, props, cb->cb_json_as_int); + } else { + /* + * If this is being called in scripted mode, or if this + * is the last column and it is left-justified, don't + * include a width format specifier. + */ + if (cb->cb_scripted || (pl->pl_next == NULL && + !right_justify)) + (void) fputs(propstr, stdout); + else if (right_justify) + (void) printf("%*s", (int)width, propstr); + else + (void) printf("%-*s", (int)width, propstr); + } } - (void) fputc('\n', stdout); + if (cb->cb_json) { + fnvlist_add_nvlist(item, "properties", props); + if (cb->cb_json_pool_key_guid) { + char pool_guid[256]; + uint64_t guid = fnvlist_lookup_uint64( + zpool_get_config(zhp, NULL), + ZPOOL_CONFIG_POOL_GUID); + snprintf(pool_guid, 256, "%llu", + (u_longlong_t)guid); + fnvlist_add_nvlist(d, pool_guid, item); + } else { + fnvlist_add_nvlist(d, zpool_get_name(zhp), + item); + } + fnvlist_free(props); + fnvlist_free(item); + } else + (void) fputc('\n', stdout); } static void -print_one_column(zpool_prop_t prop, uint64_t value, const char *str, - boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format) +collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str, + boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format, + boolean_t json, nvlist_t *nvl, boolean_t as_int) { char propval[64]; boolean_t fixed; @@ -6557,10 +6954,15 @@ print_one_column(zpool_prop_t prop, uint64_t value, const char *str, if (!valid) (void) strlcpy(propval, "-", sizeof (propval)); - if (scripted) - (void) printf("\t%s", propval); - else - (void) printf(" %*s", (int)width, propval); + if (json) { + zprop_nvlist_one_property(zpool_prop_to_name(prop), propval, + ZPROP_SRC_NONE, NULL, NULL, nvl, as_int); + } else { + if (scripted) + (void) printf("\t%s", propval); + else + (void) printf(" %*s", (int)width, propval); + } } /* @@ -6568,15 +6970,17 @@ print_one_column(zpool_prop_t prop, uint64_t value, const char *str, * not compatible with '-o' option */ static void -print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, - list_cbdata_t *cb, int depth, boolean_t isspare) +collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, + list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item) { nvlist_t **child; vdev_stat_t *vs; - uint_t c, children; + uint_t c, children = 0; char *vname; boolean_t scripted = cb->cb_scripted; uint64_t islog = B_FALSE; + nvlist_t *props, *ent, *ch, *obj, *l2c, *sp; + props = ent = ch = obj = sp = l2c = NULL; const char *dashes = "%-*s - - - - " "- - - - -\n"; @@ -6597,13 +7001,21 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) return; - if (scripted) - (void) printf("\t%s", name); - else if (strlen(name) + depth > cb->cb_namewidth) - (void) printf("%*s%s", depth, "", name); - else - (void) printf("%*s%s%*s", depth, "", name, - (int)(cb->cb_namewidth - strlen(name) - depth), ""); + if (cb->cb_json) { + props = fnvlist_alloc(); + ent = fnvlist_alloc(); + fill_vdev_info(ent, zhp, (char *)name, B_FALSE, + cb->cb_json_as_int); + } else { + if (scripted) + (void) printf("\t%s", name); + else if (strlen(name) + depth > cb->cb_namewidth) + (void) printf("%*s%s", depth, "", name); + else + (void) printf("%*s%s%*s", depth, "", name, + (int)(cb->cb_namewidth - strlen(name) - + depth), ""); + } /* * Print the properties for the individual vdevs. Some @@ -6611,30 +7023,39 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, * 'toplevel' boolean value is passed to the print_one_column() * to indicate that the value is valid. */ - if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) - print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, - scripted, B_TRUE, format); - else - print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, - scripted, toplevel, format); - print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, - scripted, toplevel, format); - print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, - NULL, scripted, toplevel, format); - print_one_column(ZPOOL_PROP_CHECKPOINT, - vs->vs_checkpoint_space, NULL, scripted, toplevel, format); - print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, - scripted, B_TRUE, format); - print_one_column(ZPOOL_PROP_FRAGMENTATION, + if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) { + collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, + scripted, B_TRUE, format, cb->cb_json, props, + cb->cb_json_as_int); + } else { + collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL, + scripted, toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + } + collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, + scripted, toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, + NULL, scripted, toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + collect_vdev_prop(ZPOOL_PROP_CHECKPOINT, + vs->vs_checkpoint_space, NULL, scripted, toplevel, format, + cb->cb_json, props, cb->cb_json_as_int); + collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, + scripted, B_TRUE, format, cb->cb_json, props, + cb->cb_json_as_int); + collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION, vs->vs_fragmentation, NULL, scripted, (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), - format); + format, cb->cb_json, props, cb->cb_json_as_int); cap = (vs->vs_space == 0) ? 0 : (vs->vs_alloc * 10000 / vs->vs_space); - print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL, - scripted, toplevel, format); - print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL, - scripted, toplevel, format); + collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL, + scripted, toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL, + scripted, toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); state = zpool_state_to_name(vs->vs_state, vs->vs_aux); if (isspare) { if (vs->vs_aux == VDEV_AUX_SPARED) @@ -6642,14 +7063,28 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, else if (vs->vs_state == VDEV_STATE_HEALTHY) state = "AVAIL"; } - print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted, - B_TRUE, format); - (void) fputc('\n', stdout); + collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted, + B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int); + + if (cb->cb_json) { + fnvlist_add_nvlist(ent, "properties", props); + fnvlist_free(props); + } else + (void) fputc('\n', stdout); } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, - &child, &children) != 0) + &child, &children) != 0) { + if (cb->cb_json) { + fnvlist_add_nvlist(item, name, ent); + fnvlist_free(ent); + } return; + } + + if (cb->cb_json) { + ch = fnvlist_alloc(); + } /* list the normal vdevs first */ for (c = 0; c < children; c++) { @@ -6668,14 +7103,28 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, vname = zpool_vdev_name(g_zfs, zhp, child[c], cb->cb_name_flags | VDEV_NAME_TYPE_ID); - print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE); + + if (name == NULL || cb->cb_json != B_TRUE) + collect_list_stats(zhp, vname, child[c], cb, depth + 2, + B_FALSE, item); + else if (cb->cb_json) { + collect_list_stats(zhp, vname, child[c], cb, depth + 2, + B_FALSE, ch); + } free(vname); } + if (cb->cb_json) { + if (!nvlist_empty(ch)) + fnvlist_add_nvlist(ent, "vdevs", ch); + fnvlist_free(ch); + } + /* list the classes: 'logs', 'dedup', and 'special' */ for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { boolean_t printed = B_FALSE; - + if (cb->cb_json) + obj = fnvlist_alloc(); for (c = 0; c < children; c++) { const char *bias = NULL; const char *type = NULL; @@ -6694,7 +7143,7 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) continue; - if (!printed) { + if (!printed && !cb->cb_json) { /* LINTED E_SEC_PRINTF_VAR_FMT */ (void) printf(dashes, cb->cb_namewidth, class_name[n]); @@ -6702,36 +7151,64 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, } vname = zpool_vdev_name(g_zfs, zhp, child[c], cb->cb_name_flags | VDEV_NAME_TYPE_ID); - print_list_stats(zhp, vname, child[c], cb, depth + 2, - B_FALSE); + collect_list_stats(zhp, vname, child[c], cb, depth + 2, + B_FALSE, obj); free(vname); } + if (cb->cb_json) { + if (!nvlist_empty(obj)) + fnvlist_add_nvlist(item, class_name[n], obj); + fnvlist_free(obj); + } } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &child, &children) == 0 && children > 0) { - /* LINTED E_SEC_PRINTF_VAR_FMT */ - (void) printf(dashes, cb->cb_namewidth, "cache"); + if (cb->cb_json) { + l2c = fnvlist_alloc(); + } else { + /* LINTED E_SEC_PRINTF_VAR_FMT */ + (void) printf(dashes, cb->cb_namewidth, "cache"); + } for (c = 0; c < children; c++) { vname = zpool_vdev_name(g_zfs, zhp, child[c], cb->cb_name_flags); - print_list_stats(zhp, vname, child[c], cb, depth + 2, - B_FALSE); + collect_list_stats(zhp, vname, child[c], cb, depth + 2, + B_FALSE, l2c); free(vname); } + if (cb->cb_json) { + if (!nvlist_empty(l2c)) + fnvlist_add_nvlist(item, "l2cache", l2c); + fnvlist_free(l2c); + } } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, &children) == 0 && children > 0) { - /* LINTED E_SEC_PRINTF_VAR_FMT */ - (void) printf(dashes, cb->cb_namewidth, "spare"); + if (cb->cb_json) { + sp = fnvlist_alloc(); + } else { + /* LINTED E_SEC_PRINTF_VAR_FMT */ + (void) printf(dashes, cb->cb_namewidth, "spare"); + } for (c = 0; c < children; c++) { vname = zpool_vdev_name(g_zfs, zhp, child[c], cb->cb_name_flags); - print_list_stats(zhp, vname, child[c], cb, depth + 2, - B_TRUE); + collect_list_stats(zhp, vname, child[c], cb, depth + 2, + B_TRUE, sp); free(vname); } + if (cb->cb_json) { + if (!nvlist_empty(sp)) + fnvlist_add_nvlist(item, "spares", sp); + fnvlist_free(sp); + } + } + + if (name != NULL && cb->cb_json) { + fnvlist_add_nvlist(item, name, ent); + fnvlist_free(ent); } } @@ -6741,17 +7218,44 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, static int list_callback(zpool_handle_t *zhp, void *data) { + nvlist_t *p, *d, *nvdevs; + uint64_t guid; + char pool_guid[256]; + const char *pool_name = zpool_get_name(zhp); list_cbdata_t *cbp = data; + p = d = nvdevs = NULL; - print_pool(zhp, cbp); + collect_pool(zhp, cbp); if (cbp->cb_verbose) { nvlist_t *config, *nvroot; - config = zpool_get_config(zhp, NULL); verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); - print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE); + if (cbp->cb_json) { + d = fnvlist_lookup_nvlist(cbp->cb_jsobj, + "pools"); + if (cbp->cb_json_pool_key_guid) { + guid = fnvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_GUID); + snprintf(pool_guid, 256, "%llu", + (u_longlong_t)guid); + p = fnvlist_lookup_nvlist(d, pool_guid); + } else { + p = fnvlist_lookup_nvlist(d, pool_name); + } + nvdevs = fnvlist_alloc(); + } + collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs); + if (cbp->cb_json) { + fnvlist_add_nvlist(p, "vdevs", nvdevs); + if (cbp->cb_json_pool_key_guid) + fnvlist_add_nvlist(d, pool_guid, p); + else + fnvlist_add_nvlist(d, pool_name, p); + fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); + fnvlist_free(nvdevs); + } } return (0); @@ -6791,6 +7295,9 @@ get_namewidth_list(zpool_handle_t *zhp, void *data) * -p Display values in parsable (exact) format. * -P Display full path for vdev name. * -T Display a timestamp in date(1) or Unix format + * -j Display the output in JSON format + * --json-int Display the numbers as integer instead of strings. + * --json-pool-key-guid Set pool GUID as key for pool objects. * * List all pools in the system, whether or not they're healthy. Output space * statistics for each one, as well as health status summary. @@ -6809,10 +7316,19 @@ zpool_do_list(int argc, char **argv) unsigned long count = 0; zpool_list_t *list; boolean_t first = B_TRUE; + nvlist_t *data = NULL; current_prop_type = ZFS_TYPE_POOL; + struct option long_options[] = { + {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, + {"json-pool-key-guid", no_argument, NULL, + ZPOOL_OPTION_POOL_KEY_GUID}, + {0, 0, 0, 0} + }; + /* check options */ - while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { + while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options, + NULL)) != -1) { switch (c) { case 'g': cb.cb_name_flags |= VDEV_NAME_GUID; @@ -6832,6 +7348,16 @@ zpool_do_list(int argc, char **argv) case 'p': cb.cb_literal = B_TRUE; break; + case 'j': + cb.cb_json = B_TRUE; + break; + case ZPOOL_OPTION_JSON_NUMS_AS_INT: + cb.cb_json_as_int = B_TRUE; + cb.cb_literal = B_TRUE; + break; + case ZPOOL_OPTION_POOL_KEY_GUID: + cb.cb_json_pool_key_guid = B_TRUE; + break; case 'T': get_timestamp_arg(*optarg); break; @@ -6854,6 +7380,18 @@ zpool_do_list(int argc, char **argv) argc -= optind; argv += optind; + if (!cb.cb_json && cb.cb_json_as_int) { + (void) fprintf(stderr, gettext("'--json-int' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + + if (!cb.cb_json && cb.cb_json_pool_key_guid) { + (void) fprintf(stderr, gettext("'json-pool-key-guid' only" + " works with '-j' option\n")); + usage(B_FALSE); + } + get_interval_count(&argc, argv, &interval, &count); if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) @@ -6867,18 +7405,43 @@ zpool_do_list(int argc, char **argv) if (pool_list_count(list) == 0) break; + if (cb.cb_json) { + cb.cb_jsobj = zpool_json_schema(0, 1); + data = fnvlist_alloc(); + fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); + fnvlist_free(data); + } + cb.cb_namewidth = 0; (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); - if (timestamp_fmt != NODATE) - print_timestamp(timestamp_fmt); + if (timestamp_fmt != NODATE) { + if (cb.cb_json) { + if (cb.cb_json_as_int) { + fnvlist_add_uint64(cb.cb_jsobj, "time", + time(NULL)); + } else { + char ts[128]; + get_timestamp(timestamp_fmt, ts, 128); + fnvlist_add_string(cb.cb_jsobj, "time", + ts); + } + } else + print_timestamp(timestamp_fmt); + } - if (!cb.cb_scripted && (first || cb.cb_verbose)) { + if (!cb.cb_scripted && (first || cb.cb_verbose) && + !cb.cb_json) { print_header(&cb); first = B_FALSE; } ret = pool_list_iter(list, B_TRUE, list_callback, &cb); + if (ret == 0 && cb.cb_json) + zcmd_print_json(cb.cb_jsobj); + else if (ret != 0 && cb.cb_json) + nvlist_free(cb.cb_jsobj); + if (interval == 0) break; @@ -6891,7 +7454,8 @@ zpool_do_list(int argc, char **argv) (void) fsleep(interval); } - if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) { + if (argc == 0 && !cb.cb_scripted && !cb.cb_json && + pool_list_count(list) == 0) { (void) printf(gettext("no pools available\n")); ret = 0; } @@ -8502,6 +9066,807 @@ check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) return (rebuilding); } +static void +vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, + int depth, boolean_t isspare, char *parent, nvlist_t *item) +{ + nvlist_t *vds, **child, *ch = NULL; + uint_t vsc, children; + vdev_stat_t *vs; + char *vname; + uint64_t notpresent; + const char *type, *path; + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + children = 0; + verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, + (uint64_t **)&vs, &vsc) == 0); + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); + if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) + return; + + if (cb->cb_print_unhealthy && depth > 0 && + for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { + return; + } + vname = zpool_vdev_name(g_zfs, zhp, nv, + cb->cb_name_flags | VDEV_NAME_TYPE_ID); + vds = fnvlist_alloc(); + fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int); + if (cb->cb_flat_vdevs && parent != NULL) { + fnvlist_add_string(vds, "parent", parent); + } + + if (isspare) { + if (vs->vs_aux == VDEV_AUX_SPARED) { + fnvlist_add_string(vds, "state", "INUSE"); + used_by_other(zhp, nv, vds); + } else if (vs->vs_state == VDEV_STATE_HEALTHY) + fnvlist_add_string(vds, "state", "AVAIL"); + } else { + if (vs->vs_alloc) { + nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + if (vs->vs_space) { + nice_num_str_nvlist(vds, "total_space", vs->vs_space, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + if (vs->vs_dspace) { + nice_num_str_nvlist(vds, "def_space", vs->vs_dspace, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + if (vs->vs_rsize) { + nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + if (vs->vs_esize) { + nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + if (vs->vs_self_healed) { + nice_num_str_nvlist(vds, "self_healed", + vs->vs_self_healed, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_BYTES); + } + if (vs->vs_pspace) { + nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + } + nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(vds, "checksum_errors", + vs->vs_checksum_errors, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_1024); + if (vs->vs_scan_processed) { + nice_num_str_nvlist(vds, "scan_processed", + vs->vs_scan_processed, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_BYTES); + } + if (vs->vs_checkpoint_space) { + nice_num_str_nvlist(vds, "checkpoint_space", + vs->vs_checkpoint_space, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_BYTES); + } + if (vs->vs_resilver_deferred) { + nice_num_str_nvlist(vds, "resilver_deferred", + vs->vs_resilver_deferred, B_TRUE, + cb->cb_json_as_int, ZFS_NICENUM_1024); + } + if (children == 0) { + nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + } + if (cb->cb_print_power) { + if (children == 0) { + /* Only leaf vdevs have physical slots */ + switch (zpool_power_current_state(zhp, (char *) + fnvlist_lookup_string(nv, + ZPOOL_CONFIG_PATH))) { + case 0: + fnvlist_add_string(vds, "power_state", + "off"); + break; + case 1: + fnvlist_add_string(vds, "power_state", + "on"); + break; + default: + fnvlist_add_string(vds, "power_state", + "-"); + } + } else { + fnvlist_add_string(vds, "power_state", "-"); + } + } + } + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, + ¬present) == 0) { + nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT, + 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + fnvlist_add_string(vds, "was", + fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH)); + } else if (vs->vs_aux != VDEV_AUX_NONE) { + fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]); + } else if (children == 0 && !isspare && + getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && + VDEV_STAT_VALID(vs_physical_ashift, vsc) && + vs->vs_configured_ashift < vs->vs_physical_ashift) { + nice_num_str_nvlist(vds, "configured_ashift", + vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(vds, "physical_ashift", + vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int, + ZFS_NICENUM_1024); + } + if (vs->vs_scan_removing != 0) { + nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing, + B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); + } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { + nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc, + B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); + } + + if (cb->vcdl != NULL) { + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { + zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp), + path, vds); + } + } + + if (children == 0) { + if (cb->cb_print_vdev_init) { + if (vs->vs_initialize_state != 0) { + uint64_t st = vs->vs_initialize_state; + fnvlist_add_string(vds, "init_state", + vdev_init_state_str[st]); + nice_num_str_nvlist(vds, "initialized", + vs->vs_initialize_bytes_done, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(vds, "to_initialize", + vs->vs_initialize_bytes_est, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(vds, "init_time", + vs->vs_initialize_action_time, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(vds, "init_errors", + vs->vs_initialize_errors, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + } else { + fnvlist_add_string(vds, "init_state", + "UNINITIALIZED"); + } + } + if (cb->cb_print_vdev_trim) { + if (vs->vs_trim_notsup == 0) { + if (vs->vs_trim_state != 0) { + uint64_t st = vs->vs_trim_state; + fnvlist_add_string(vds, "trim_state", + vdev_trim_state_str[st]); + nice_num_str_nvlist(vds, "trimmed", + vs->vs_trim_bytes_done, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(vds, "to_trim", + vs->vs_trim_bytes_est, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(vds, "trim_time", + vs->vs_trim_action_time, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(vds, "trim_errors", + vs->vs_trim_errors, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + } else + fnvlist_add_string(vds, "trim_state", + "UNTRIMMED"); + } + nice_num_str_nvlist(vds, "trim_notsup", + vs->vs_trim_notsup, B_TRUE, + cb->cb_json_as_int, ZFS_NICENUM_1024); + } + } else { + ch = fnvlist_alloc(); + } + + if (cb->cb_flat_vdevs && children == 0) { + fnvlist_add_nvlist(item, vname, vds); + } + + for (int c = 0; c < children; c++) { + uint64_t islog = B_FALSE, ishole = B_FALSE; + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &islog); + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, + &ishole); + if (islog || ishole) + continue; + if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) + continue; + if (cb->cb_flat_vdevs) { + vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, + vname, item); + } + vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, + vname, ch); + } + + if (ch != NULL) { + if (!nvlist_empty(ch)) + fnvlist_add_nvlist(vds, "vdevs", ch); + fnvlist_free(ch); + } + fnvlist_add_nvlist(item, vname, vds); + fnvlist_free(vds); + free(vname); +} + +static void +class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, + const char *class, nvlist_t *item) +{ + uint_t c, children; + nvlist_t **child; + nvlist_t *class_obj = NULL; + + if (!cb->cb_flat_vdevs) + class_obj = fnvlist_alloc(); + + assert(zhp != NULL || !cb->cb_verbose); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, + &children) != 0) + return; + + for (c = 0; c < children; c++) { + uint64_t is_log = B_FALSE; + const char *bias = NULL; + const char *type = NULL; + char *name = zpool_vdev_name(g_zfs, zhp, child[c], + cb->cb_name_flags | VDEV_NAME_TYPE_ID); + + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + + if (is_log) { + bias = (char *)VDEV_ALLOC_CLASS_LOGS; + } else { + (void) nvlist_lookup_string(child[c], + ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); + (void) nvlist_lookup_string(child[c], + ZPOOL_CONFIG_TYPE, &type); + } + + if (bias == NULL || strcmp(bias, class) != 0) + continue; + if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) + continue; + + if (cb->cb_flat_vdevs) { + vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, + NULL, item); + } else { + vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, + NULL, class_obj); + } + free(name); + } + if (!cb->cb_flat_vdevs) { + if (!nvlist_empty(class_obj)) + fnvlist_add_nvlist(item, class, class_obj); + fnvlist_free(class_obj); + } +} + +static void +l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, + nvlist_t *item) +{ + nvlist_t *l2c = NULL, **l2cache; + uint_t nl2cache; + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) { + if (nl2cache == 0) + return; + if (!cb->cb_flat_vdevs) + l2c = fnvlist_alloc(); + for (int i = 0; i < nl2cache; i++) { + if (cb->cb_flat_vdevs) { + vdev_stats_nvlist(zhp, cb, l2cache[i], 2, + B_FALSE, NULL, item); + } else { + vdev_stats_nvlist(zhp, cb, l2cache[i], 2, + B_FALSE, NULL, l2c); + } + } + } + if (!cb->cb_flat_vdevs) { + if (!nvlist_empty(l2c)) + fnvlist_add_nvlist(item, "l2cache", l2c); + fnvlist_free(l2c); + } +} + +static void +spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, + nvlist_t *item) +{ + nvlist_t *sp = NULL, **spares; + uint_t nspares; + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + if (nspares == 0) + return; + if (!cb->cb_flat_vdevs) + sp = fnvlist_alloc(); + for (int i = 0; i < nspares; i++) { + if (cb->cb_flat_vdevs) { + vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, + NULL, item); + } else { + vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, + NULL, sp); + } + } + } + if (!cb->cb_flat_vdevs) { + if (!nvlist_empty(sp)) + fnvlist_add_nvlist(item, "spares", sp); + fnvlist_free(sp); + } +} + +static void +errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) +{ + uint64_t nerr; + nvlist_t *config = zpool_get_config(zhp, NULL); + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, + &nerr) == 0) { + nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + if (nerr != 0 && cb->cb_verbose) { + nvlist_t *nverrlist = NULL; + if (zpool_get_errlog(zhp, &nverrlist) == 0) { + int i = 0; + int count = 0; + size_t len = MAXPATHLEN * 2; + nvpair_t *elem = NULL; + + for (nvpair_t *pair = + nvlist_next_nvpair(nverrlist, NULL); + pair != NULL; + pair = nvlist_next_nvpair(nverrlist, pair)) + count++; + char **errl = (char **)malloc( + count * sizeof (char *)); + + while ((elem = nvlist_next_nvpair(nverrlist, + elem)) != NULL) { + nvlist_t *nv; + uint64_t dsobj, obj; + + verify(nvpair_value_nvlist(elem, + &nv) == 0); + verify(nvlist_lookup_uint64(nv, + ZPOOL_ERR_DATASET, &dsobj) == 0); + verify(nvlist_lookup_uint64(nv, + ZPOOL_ERR_OBJECT, &obj) == 0); + errl[i] = safe_malloc(len); + zpool_obj_to_path(zhp, dsobj, obj, + errl[i++], len); + } + nvlist_free(nverrlist); + fnvlist_add_string_array(item, "errlist", + (const char **)errl, count); + for (int i = 0; i < count; ++i) + free(errl[i]); + free(errl); + } else + fnvlist_add_string(item, "errlist", + strerror(errno)); + } + } +} + +static void +ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item) +{ + nice_num_str_nvlist(item, "blocks", dds->dds_blocks, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(item, "logical_size", dds->dds_lsize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(item, "physical_size", dds->dds_psize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); +} + +static void +dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) +{ + nvlist_t *config; + if (cb->cb_dedup_stats) { + ddt_histogram_t *ddh; + ddt_stat_t *dds; + ddt_object_t *ddo; + nvlist_t *ddt_stat, *ddt_obj, *dedup; + uint_t c; + uint64_t cspace_prop; + + config = zpool_get_config(zhp, NULL); + if (nvlist_lookup_uint64_array(config, + ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0) + return; + + dedup = fnvlist_alloc(); + ddt_obj = fnvlist_alloc(); + nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + if (ddo->ddo_count == 0) { + fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, + ddt_obj); + fnvlist_add_nvlist(item, "dedup_stats", dedup); + fnvlist_free(ddt_obj); + fnvlist_free(dedup); + return; + } else { + nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + /* + * Squash cached size into in-core size to handle race. + * Only include cached size if it is available. + */ + cspace_prop = zpool_get_prop_int(zhp, + ZPOOL_PROP_DEDUPCACHED, NULL); + cspace_prop = MIN(cspace_prop, ddo->ddo_mspace); + nice_num_str_nvlist(dedup, "cspace", cspace_prop, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + } + + ddt_stat = fnvlist_alloc(); + if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, + (uint64_t **)&dds, &c) == 0) { + nvlist_t *total = fnvlist_alloc(); + if (dds->dds_blocks == 0) + fnvlist_add_string(total, "blocks", "0"); + else + ddt_stats_nvlist(dds, cb, total); + fnvlist_add_nvlist(ddt_stat, "total", total); + fnvlist_free(total); + } + if (nvlist_lookup_uint64_array(config, + ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) { + nvlist_t *hist = fnvlist_alloc(); + nvlist_t *entry = NULL; + char buf[16]; + for (int h = 0; h < 64; h++) { + if (ddh->ddh_stat[h].dds_blocks != 0) { + entry = fnvlist_alloc(); + ddt_stats_nvlist(&ddh->ddh_stat[h], cb, + entry); + snprintf(buf, 16, "%d", h); + fnvlist_add_nvlist(hist, buf, entry); + fnvlist_free(entry); + } + } + if (!nvlist_empty(hist)) + fnvlist_add_nvlist(ddt_stat, "histogram", hist); + fnvlist_free(hist); + } + + if (!nvlist_empty(ddt_obj)) { + fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, + ddt_obj); + } + fnvlist_free(ddt_obj); + if (!nvlist_empty(ddt_stat)) { + fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS, + ddt_stat); + } + fnvlist_free(ddt_stat); + if (!nvlist_empty(dedup)) + fnvlist_add_nvlist(item, "dedup_stats", dedup); + fnvlist_free(dedup); + } +} + +static void +raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, + nvlist_t *nvroot, nvlist_t *item) +{ + uint_t c; + pool_raidz_expand_stat_t *pres = NULL; + if (nvlist_lookup_uint64_array(nvroot, + ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) { + nvlist_t **child; + uint_t children; + nvlist_t *nv = fnvlist_alloc(); + verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0); + assert(pres->pres_expanding_vdev < children); + char *name = + zpool_vdev_name(g_zfs, zhp, + child[pres->pres_expanding_vdev], 0); + fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int); + fnvlist_add_string(nv, "state", + pool_scan_state_str[pres->pres_state]); + nice_num_str_nvlist(nv, "expanding_vdev", + pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(nv, "start_time", pres->pres_start_time, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "end_time", pres->pres_end_time, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "waiting_for_resilver", + pres->pres_waiting_for_resilver, B_TRUE, + cb->cb_json_as_int, ZFS_NICENUM_1024); + fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv); + fnvlist_free(nv); + free(name); + } +} + +static void +checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb, + nvlist_t *item) +{ + uint_t c; + pool_checkpoint_stat_t *pcs = NULL; + if (nvlist_lookup_uint64_array(nvroot, + ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) { + nvlist_t *nv = fnvlist_alloc(); + fnvlist_add_string(nv, "state", + checkpoint_state_str[pcs->pcs_state]); + nice_num_str_nvlist(nv, "start_time", + pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "space", + pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv); + fnvlist_free(nv); + } +} + +static void +removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, + nvlist_t *nvroot, nvlist_t *item) +{ + uint_t c; + pool_removal_stat_t *prs = NULL; + if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS, + (uint64_t **)&prs, &c) == 0) { + if (prs->prs_state != DSS_NONE) { + nvlist_t **child; + uint_t children; + verify(nvlist_lookup_nvlist_array(nvroot, + ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); + assert(prs->prs_removing_vdev < children); + char *vdev_name = zpool_vdev_name(g_zfs, zhp, + child[prs->prs_removing_vdev], B_TRUE); + nvlist_t *nv = fnvlist_alloc(); + fill_vdev_info(nv, zhp, vdev_name, B_FALSE, + cb->cb_json_as_int); + fnvlist_add_string(nv, "state", + pool_scan_state_str[prs->prs_state]); + nice_num_str_nvlist(nv, "removing_vdev", + prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(nv, "start_time", + prs->prs_start_time, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "end_time", prs->prs_end_time, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "copied", prs->prs_copied, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "mapping_memory", + prs->prs_mapping_memory, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_BYTES); + fnvlist_add_nvlist(item, + ZPOOL_CONFIG_REMOVAL_STATS, nv); + fnvlist_free(nv); + free(vdev_name); + } + } +} + +static void +scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, + nvlist_t *nvroot, nvlist_t *item) +{ + pool_scan_stat_t *ps = NULL; + uint_t c; + nvlist_t *scan = fnvlist_alloc(); + nvlist_t **child; + uint_t children; + + if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, + (uint64_t **)&ps, &c) == 0) { + fnvlist_add_string(scan, "function", + pool_scan_func_str[ps->pss_func]); + fnvlist_add_string(scan, "state", + pool_scan_state_str[ps->pss_state]); + nice_num_str_nvlist(scan, "start_time", ps->pss_start_time, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(scan, "end_time", ps->pss_end_time, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "examined", ps->pss_examined, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "skipped", ps->pss_skipped, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "processed", ps->pss_processed, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "errors", ps->pss_errors, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start, + B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(scan, "scrub_pause", + ps->pss_pass_scrub_pause, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(scan, "scrub_spent_paused", + ps->pss_pass_scrub_spent_paused, + B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); + nice_num_str_nvlist(scan, "issued_bytes_per_scan", + ps->pss_pass_issued, cb->cb_literal, + cb->cb_json_as_int, ZFS_NICENUM_BYTES); + nice_num_str_nvlist(scan, "issued", ps->pss_issued, + cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); + if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB && + ps->pss_error_scrub_start > ps->pss_start_time) { + fnvlist_add_string(scan, "err_scrub_func", + pool_scan_func_str[ps->pss_error_scrub_func]); + fnvlist_add_string(scan, "err_scrub_state", + pool_scan_state_str[ps->pss_error_scrub_state]); + nice_num_str_nvlist(scan, "err_scrub_start_time", + ps->pss_error_scrub_start, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(scan, "err_scrub_end_time", + ps->pss_error_scrub_end, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(scan, "err_scrub_examined", + ps->pss_error_scrub_examined, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(scan, "err_scrub_to_examine", + ps->pss_error_scrub_to_be_examined, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(scan, "err_scrub_pause", + ps->pss_pass_error_scrub_pause, + B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); + } + } + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + vdev_rebuild_stat_t *vrs; + uint_t i; + char *name; + nvlist_t *nv; + nvlist_t *rebuild = fnvlist_alloc(); + uint64_t st; + for (uint_t c = 0; c < children; c++) { + if (nvlist_lookup_uint64_array(child[c], + ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, + &i) == 0) { + if (vrs->vrs_state != VDEV_REBUILD_NONE) { + nv = fnvlist_alloc(); + name = zpool_vdev_name(g_zfs, zhp, + child[c], VDEV_NAME_TYPE_ID); + fill_vdev_info(nv, zhp, name, B_FALSE, + cb->cb_json_as_int); + st = vrs->vrs_state; + fnvlist_add_string(nv, "state", + vdev_rebuild_state_str[st]); + nice_num_str_nvlist(nv, "start_time", + vrs->vrs_start_time, cb->cb_literal, + cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "end_time", + vrs->vrs_end_time, cb->cb_literal, + cb->cb_json_as_int, + ZFS_NICE_TIMESTAMP); + nice_num_str_nvlist(nv, "scan_time", + vrs->vrs_scan_time_ms * 1000000, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_TIME); + nice_num_str_nvlist(nv, "scanned", + vrs->vrs_bytes_scanned, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "issued", + vrs->vrs_bytes_issued, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "rebuilt", + vrs->vrs_bytes_rebuilt, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "to_scan", + vrs->vrs_bytes_est, cb->cb_literal, + cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "errors", + vrs->vrs_errors, cb->cb_literal, + cb->cb_json_as_int, + ZFS_NICENUM_1024); + nice_num_str_nvlist(nv, "pass_time", + vrs->vrs_pass_time_ms * 1000000, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_TIME); + nice_num_str_nvlist(nv, "pass_scanned", + vrs->vrs_pass_bytes_scanned, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "pass_issued", + vrs->vrs_pass_bytes_issued, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + nice_num_str_nvlist(nv, "pass_skipped", + vrs->vrs_pass_bytes_skipped, + cb->cb_literal, cb->cb_json_as_int, + ZFS_NICENUM_BYTES); + fnvlist_add_nvlist(rebuild, name, nv); + free(name); + } + } + } + if (!nvlist_empty(rebuild)) + fnvlist_add_nvlist(scan, "rebuild_stats", rebuild); + fnvlist_free(rebuild); + } + + if (!nvlist_empty(scan)) + fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan); + fnvlist_free(scan); +} + /* * Print the scan status. */ @@ -8911,295 +10276,199 @@ print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal) (void) printf(", %s cached (%.02f%%)", cspace, (double)cspace_prop / (double)ddo->ddo_mspace * 100.0); - } - (void) printf("\n"); - - verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, - (uint64_t **)&dds, &c) == 0); - verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, - (uint64_t **)&ddh, &c) == 0); - zpool_dump_ddt(dds, ddh); -} - -/* - * Display a summary of pool status. Displays a summary such as: - * - * pool: tank - * status: DEGRADED - * reason: One or more devices ... - * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 - * config: - * mirror DEGRADED - * c1t0d0 OK - * c2t0d0 UNAVAIL - * - * When given the '-v' option, we print out the complete config. If the '-e' - * option is specified, then we print out error rate information as well. - */ -static int -status_callback(zpool_handle_t *zhp, void *data) -{ - status_cbdata_t *cbp = data; - nvlist_t *config, *nvroot; - const char *msgid; - zpool_status_t reason; - zpool_errata_t errata; - const char *health; - uint_t c; - vdev_stat_t *vs; - - /* If dedup stats were requested, also fetch dedupcached. */ - if (cbp->cb_dedup_stats > 1) - zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); - - config = zpool_get_config(zhp, NULL); - reason = zpool_get_status(zhp, &msgid, &errata); - - cbp->cb_count++; - - /* - * If we were given 'zpool status -x', only report those pools with - * problems. - */ - if (cbp->cb_explain && - (reason == ZPOOL_STATUS_OK || - reason == ZPOOL_STATUS_VERSION_OLDER || - reason == ZPOOL_STATUS_FEAT_DISABLED || - reason == ZPOOL_STATUS_COMPATIBILITY_ERR || - reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { - if (!cbp->cb_allpools) { - (void) printf(gettext("pool '%s' is healthy\n"), - zpool_get_name(zhp)); - if (cbp->cb_first) - cbp->cb_first = B_FALSE; - } - return (0); - } - - if (cbp->cb_first) - cbp->cb_first = B_FALSE; - else - (void) printf("\n"); - - nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); - verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, - (uint64_t **)&vs, &c) == 0); - - health = zpool_get_state_str(zhp); - - printf(" "); - printf_color(ANSI_BOLD, gettext("pool:")); - printf(" %s\n", zpool_get_name(zhp)); - fputc(' ', stdout); - printf_color(ANSI_BOLD, gettext("state: ")); + } + (void) printf("\n"); - printf_color(health_str_to_color(health), "%s", health); + verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, + (uint64_t **)&dds, &c) == 0); + verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, + (uint64_t **)&ddh, &c) == 0); + zpool_dump_ddt(dds, ddh); +} - fputc('\n', stdout); +#define ST_SIZE 4096 +#define AC_SIZE 2048 + +static void +print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp, + zpool_status_t reason, zpool_errata_t errata, nvlist_t *item) +{ + char status[ST_SIZE]; + char action[AC_SIZE]; + memset(status, 0, ST_SIZE); + memset(action, 0, AC_SIZE); switch (reason) { case ZPOOL_STATUS_MISSING_DEV_R: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices could " + snprintf(status, ST_SIZE, gettext("One or more devices could " "not be opened. Sufficient replicas exist for\n\tthe pool " "to continue functioning in a degraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Attach the missing device " + snprintf(action, AC_SIZE, gettext("Attach the missing device " "and online it using 'zpool online'.\n")); break; case ZPOOL_STATUS_MISSING_DEV_NR: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices could " + snprintf(status, ST_SIZE, gettext("One or more devices could " "not be opened. There are insufficient\n\treplicas for the" " pool to continue functioning.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Attach the missing device " + snprintf(action, AC_SIZE, gettext("Attach the missing device " "and online it using 'zpool online'.\n")); break; case ZPOOL_STATUS_CORRUPT_LABEL_R: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices could " + snprintf(status, ST_SIZE, gettext("One or more devices could " "not be used because the label is missing or\n\tinvalid. " "Sufficient replicas exist for the pool to continue\n\t" "functioning in a degraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Replace the device using " + snprintf(action, AC_SIZE, gettext("Replace the device using " "'zpool replace'.\n")); break; case ZPOOL_STATUS_CORRUPT_LABEL_NR: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices could " + snprintf(status, ST_SIZE, gettext("One or more devices could " "not be used because the label is missing \n\tor invalid. " "There are insufficient replicas for the pool to " "continue\n\tfunctioning.\n")); zpool_explain_recover(zpool_get_handle(zhp), - zpool_get_name(zhp), reason, config); + zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), + action, AC_SIZE); break; case ZPOOL_STATUS_FAILING_DEV: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices has " + snprintf(status, ST_SIZE, gettext("One or more devices has " "experienced an unrecoverable error. An\n\tattempt was " "made to correct the error. Applications are " "unaffected.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Determine if the " + snprintf(action, AC_SIZE, gettext("Determine if the " "device needs to be replaced, and clear the errors\n\tusing" " 'zpool clear' or replace the device with 'zpool " "replace'.\n")); break; case ZPOOL_STATUS_OFFLINE_DEV: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices has " + snprintf(status, ST_SIZE, gettext("One or more devices has " "been taken offline by the administrator.\n\tSufficient " "replicas exist for the pool to continue functioning in " "a\n\tdegraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Online the device " + snprintf(action, AC_SIZE, gettext("Online the device " "using 'zpool online' or replace the device with\n\t'zpool " "replace'.\n")); break; case ZPOOL_STATUS_REMOVED_DEV: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices has " + snprintf(status, ST_SIZE, gettext("One or more devices has " "been removed by the administrator.\n\tSufficient " "replicas exist for the pool to continue functioning in " "a\n\tdegraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Online the device " + snprintf(action, AC_SIZE, gettext("Online the device " "using zpool online' or replace the device with\n\t'zpool " "replace'.\n")); break; case ZPOOL_STATUS_RESILVERING: case ZPOOL_STATUS_REBUILDING: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices is " + snprintf(status, ST_SIZE, gettext("One or more devices is " "currently being resilvered. The pool will\n\tcontinue " "to function, possibly in a degraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Wait for the resilver to " + snprintf(action, AC_SIZE, gettext("Wait for the resilver to " "complete.\n")); break; case ZPOOL_STATUS_REBUILD_SCRUB: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices have " + snprintf(status, ST_SIZE, gettext("One or more devices have " "been sequentially resilvered, scrubbing\n\tthe pool " "is recommended.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to " + snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to " "verify all data checksums.\n")); break; case ZPOOL_STATUS_CORRUPT_DATA: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices has " + snprintf(status, ST_SIZE, gettext("One or more devices has " "experienced an error resulting in data\n\tcorruption. " "Applications may be affected.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Restore the file in question" + snprintf(action, AC_SIZE, gettext("Restore the file in question" " if possible. Otherwise restore the\n\tentire pool from " "backup.\n")); break; case ZPOOL_STATUS_CORRUPT_POOL: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool metadata is " + snprintf(status, ST_SIZE, gettext("The pool metadata is " "corrupted and the pool cannot be opened.\n")); zpool_explain_recover(zpool_get_handle(zhp), - zpool_get_name(zhp), reason, config); + zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), + action, AC_SIZE); break; case ZPOOL_STATUS_VERSION_OLDER: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool is formatted using " + snprintf(status, ST_SIZE, gettext("The pool is formatted using " "a legacy on-disk format. The pool can\n\tstill be used, " "but some features are unavailable.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Upgrade the pool using " + snprintf(action, AC_SIZE, gettext("Upgrade the pool using " "'zpool upgrade'. Once this is done, the\n\tpool will no " "longer be accessible on software that does not support\n\t" "feature flags.\n")); break; case ZPOOL_STATUS_VERSION_NEWER: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool has been upgraded " + snprintf(status, ST_SIZE, gettext("The pool has been upgraded " "to a newer, incompatible on-disk version.\n\tThe pool " "cannot be accessed on this system.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Access the pool from a " + snprintf(action, AC_SIZE, gettext("Access the pool from a " "system running more recent software, or\n\trestore the " "pool from backup.\n")); break; case ZPOOL_STATUS_FEAT_DISABLED: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("Some supported and " + snprintf(status, ST_SIZE, gettext("Some supported and " "requested features are not enabled on the pool.\n\t" "The pool can still be used, but some features are " "unavailable.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Enable all features using " + snprintf(action, AC_SIZE, gettext("Enable all features using " "'zpool upgrade'. Once this is done,\n\tthe pool may no " "longer be accessible by software that does not support\n\t" "the features. See zpool-features(7) for details.\n")); break; case ZPOOL_STATUS_COMPATIBILITY_ERR: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("This pool has a " + snprintf(status, ST_SIZE, gettext("This pool has a " "compatibility list specified, but it could not be\n\t" "read/parsed at this time. The pool can still be used, " "but this\n\tshould be investigated.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Check the value of the " + snprintf(action, AC_SIZE, gettext("Check the value of the " "'compatibility' property against the\n\t" "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); break; case ZPOOL_STATUS_INCOMPATIBLE_FEAT: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more features " + snprintf(status, ST_SIZE, gettext("One or more features " "are enabled on the pool despite not being\n\t" "requested by the 'compatibility' property.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Consider setting " + snprintf(action, AC_SIZE, gettext("Consider setting " "'compatibility' to an appropriate value, or\n\t" "adding needed features to the relevant file in\n\t" ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); break; case ZPOOL_STATUS_UNSUP_FEAT_READ: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " + snprintf(status, ST_SIZE, gettext("The pool cannot be accessed " "on this system because it uses the\n\tfollowing feature(s)" " not supported on this system:\n")); - zpool_print_unsup_feat(config); - (void) printf("\n"); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Access the pool from a " + zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, + 1024); + snprintf(action, AC_SIZE, gettext("Access the pool from a " "system that supports the required feature(s),\n\tor " "restore the pool from backup.\n")); break; case ZPOOL_STATUS_UNSUP_FEAT_WRITE: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool can only be " + snprintf(status, ST_SIZE, gettext("The pool can only be " "accessed in read-only mode on this system. It\n\tcannot be" " accessed in read-write mode because it uses the " "following\n\tfeature(s) not supported on this system:\n")); - zpool_print_unsup_feat(config); - (void) printf("\n"); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " + zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, + 1024); + snprintf(action, AC_SIZE, gettext("The pool cannot be accessed " "in read-write mode. Import the pool with\n" "\t\"-o readonly=on\", access the pool from a system that " "supports the\n\trequired feature(s), or restore the " @@ -9207,106 +10476,90 @@ status_callback(zpool_handle_t *zhp, void *data) break; case ZPOOL_STATUS_FAULTED_DEV_R: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices are " + snprintf(status, ST_SIZE, gettext("One or more devices are " "faulted in response to persistent errors.\n\tSufficient " "replicas exist for the pool to continue functioning " "in a\n\tdegraded state.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Replace the faulted device, " + snprintf(action, AC_SIZE, gettext("Replace the faulted device, " "or use 'zpool clear' to mark the device\n\trepaired.\n")); break; case ZPOOL_STATUS_FAULTED_DEV_NR: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices are " + snprintf(status, ST_SIZE, gettext("One or more devices are " "faulted in response to persistent errors. There are " "insufficient replicas for the pool to\n\tcontinue " "functioning.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Destroy and re-create the " + snprintf(action, AC_SIZE, gettext("Destroy and re-create the " "pool from a backup source. Manually marking the device\n" "\trepaired using 'zpool clear' may allow some data " "to be recovered.\n")); break; case ZPOOL_STATUS_IO_FAILURE_MMP: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("The pool is suspended " + snprintf(status, ST_SIZE, gettext("The pool is suspended " "because multihost writes failed or were delayed;\n\t" "another system could import the pool undetected.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices" + snprintf(action, AC_SIZE, gettext("Make sure the pool's devices" " are connected, then reboot your system and\n\timport the " "pool or run 'zpool clear' to resume the pool.\n")); break; case ZPOOL_STATUS_IO_FAILURE_WAIT: case ZPOOL_STATUS_IO_FAILURE_CONTINUE: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("One or more devices are " + snprintf(status, ST_SIZE, gettext("One or more devices are " "faulted in response to IO failures.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Make sure the affected " + snprintf(action, AC_SIZE, gettext("Make sure the affected " "devices are connected, then run 'zpool clear'.\n")); break; case ZPOOL_STATUS_BAD_LOG: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("An intent log record " + snprintf(status, ST_SIZE, gettext("An intent log record " "could not be read.\n" "\tWaiting for administrator intervention to fix the " "faulted pool.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Either restore the affected " + snprintf(action, AC_SIZE, gettext("Either restore the affected " "device(s) and run 'zpool online',\n" "\tor ignore the intent log records by running " "'zpool clear'.\n")); break; case ZPOOL_STATUS_NON_NATIVE_ASHIFT: - (void) printf(gettext("status: One or more devices are " + snprintf(status, ST_SIZE, gettext("One or more devices are " "configured to use a non-native block size.\n" "\tExpect reduced performance.\n")); - (void) printf(gettext("action: Replace affected devices with " - "devices that support the\n\tconfigured block size, or " - "migrate data to a properly configured\n\tpool.\n")); + snprintf(action, AC_SIZE, gettext("Replace affected devices " + "with devices that support the\n\tconfigured block size, " + "or migrate data to a properly configured\n\tpool.\n")); break; case ZPOOL_STATUS_HOSTID_MISMATCH: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid" + snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid" " and system hostid on imported pool.\n\tThis pool was " "previously imported into a system with a different " "hostid,\n\tand then was verbatim imported into this " "system.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("Export this pool on all " + snprintf(action, AC_SIZE, gettext("Export this pool on all " "systems on which it is imported.\n" "\tThen import it to correct the mismatch.\n")); break; case ZPOOL_STATUS_ERRATA: - printf_color(ANSI_BOLD, gettext("status: ")); - printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), + snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"), errata); - switch (errata) { case ZPOOL_ERRATA_NONE: break; case ZPOOL_ERRATA_ZOL_2094_SCRUB: - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("To correct the issue" + snprintf(action, AC_SIZE, gettext("To correct the issue" " run 'zpool scrub'.\n")); break; case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: - (void) printf(gettext("\tExisting encrypted datasets " - "contain an on-disk incompatibility\n\twhich " - "needs to be corrected.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("To correct the issue" + (void) strlcat(status, gettext("\tExisting encrypted " + "datasets contain an on-disk incompatibility\n\t " + "which needs to be corrected.\n"), ST_SIZE); + snprintf(action, AC_SIZE, gettext("To correct the issue" " backup existing encrypted datasets to new\n\t" "encrypted datasets and destroy the old ones. " "'zfs mount -o ro' can\n\tbe used to temporarily " @@ -9314,12 +10567,12 @@ status_callback(zpool_handle_t *zhp, void *data) break; case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: - (void) printf(gettext("\tExisting encrypted snapshots " - "and bookmarks contain an on-disk\n\tincompat" - "ibility. This may cause on-disk corruption if " - "they are used\n\twith 'zfs recv'.\n")); - printf_color(ANSI_BOLD, gettext("action: ")); - printf_color(ANSI_YELLOW, gettext("To correct the" + (void) strlcat(status, gettext("\tExisting encrypted " + "snapshots and bookmarks contain an on-disk\n\t" + "incompatibility. This may cause on-disk " + "corruption if they are used\n\twith " + "'zfs recv'.\n"), ST_SIZE); + snprintf(action, AC_SIZE, gettext("To correct the" "issue, enable the bookmark_v2 feature. No " "additional\n\taction is needed if there are no " "encrypted snapshots or bookmarks.\n\tIf preserving" @@ -9345,6 +10598,210 @@ status_callback(zpool_handle_t *zhp, void *data) assert(reason == ZPOOL_STATUS_OK); } + if (status[0] != 0) { + if (cbp->cb_json) + fnvlist_add_string(item, "status", status); + else { + printf_color(ANSI_BOLD, gettext("status: ")); + printf_color(ANSI_YELLOW, status); + } + } + + if (action[0] != 0) { + if (cbp->cb_json) + fnvlist_add_string(item, "action", action); + else { + printf_color(ANSI_BOLD, gettext("action: ")); + printf_color(ANSI_YELLOW, action); + } + } +} + +static int +status_callback_json(zpool_handle_t *zhp, void *data) +{ + status_cbdata_t *cbp = data; + nvlist_t *config, *nvroot; + const char *msgid; + char pool_guid[256]; + char msgbuf[256]; + uint64_t guid; + zpool_status_t reason; + zpool_errata_t errata; + uint_t c; + vdev_stat_t *vs; + nvlist_t *item, *d, *load_info, *vds; + item = d = NULL; + + /* If dedup stats were requested, also fetch dedupcached. */ + if (cbp->cb_dedup_stats > 1) + zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); + reason = zpool_get_status(zhp, &msgid, &errata); + /* + * If we were given 'zpool status -x', only report those pools with + * problems. + */ + if (cbp->cb_explain && + (reason == ZPOOL_STATUS_OK || + reason == ZPOOL_STATUS_VERSION_OLDER || + reason == ZPOOL_STATUS_FEAT_DISABLED || + reason == ZPOOL_STATUS_COMPATIBILITY_ERR || + reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { + return (0); + } + + d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); + item = fnvlist_alloc(); + vds = fnvlist_alloc(); + fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int); + config = zpool_get_config(zhp, NULL); + + if (config != NULL) { + nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); + verify(nvlist_lookup_uint64_array(nvroot, + ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0); + if (cbp->cb_json_pool_key_guid) { + guid = fnvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_GUID); + snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid); + } + cbp->cb_count++; + + print_status_reason(zhp, cbp, reason, errata, item); + if (msgid != NULL) { + snprintf(msgbuf, 256, + "https://openzfs.github.io/openzfs-docs/msg/%s", + msgid); + fnvlist_add_string(item, "msgid", msgid); + fnvlist_add_string(item, "moreinfo", msgbuf); + } + + if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, + &load_info) == 0) { + fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO, + load_info); + } + + scan_status_nvlist(zhp, cbp, nvroot, item); + removal_status_nvlist(zhp, cbp, nvroot, item); + checkpoint_status_nvlist(nvroot, cbp, item); + raidz_expand_status_nvlist(zhp, cbp, nvroot, item); + vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds); + if (cbp->cb_flat_vdevs) { + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_BIAS_DEDUP, vds); + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_BIAS_SPECIAL, vds); + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_CLASS_LOGS, vds); + l2cache_nvlist(zhp, cbp, nvroot, vds); + spares_nvlist(zhp, cbp, nvroot, vds); + + fnvlist_add_nvlist(item, "vdevs", vds); + fnvlist_free(vds); + } else { + fnvlist_add_nvlist(item, "vdevs", vds); + fnvlist_free(vds); + + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_BIAS_DEDUP, item); + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_BIAS_SPECIAL, item); + class_vdevs_nvlist(zhp, cbp, nvroot, + VDEV_ALLOC_CLASS_LOGS, item); + l2cache_nvlist(zhp, cbp, nvroot, item); + spares_nvlist(zhp, cbp, nvroot, item); + } + dedup_stats_nvlist(zhp, cbp, item); + errors_nvlist(zhp, cbp, item); + } + if (cbp->cb_json_pool_key_guid) { + fnvlist_add_nvlist(d, pool_guid, item); + } else { + fnvlist_add_nvlist(d, zpool_get_name(zhp), + item); + } + fnvlist_free(item); + return (0); +} + +/* + * Display a summary of pool status. Displays a summary such as: + * + * pool: tank + * status: DEGRADED + * reason: One or more devices ... + * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 + * config: + * mirror DEGRADED + * c1t0d0 OK + * c2t0d0 UNAVAIL + * + * When given the '-v' option, we print out the complete config. If the '-e' + * option is specified, then we print out error rate information as well. + */ +static int +status_callback(zpool_handle_t *zhp, void *data) +{ + status_cbdata_t *cbp = data; + nvlist_t *config, *nvroot; + const char *msgid; + zpool_status_t reason; + zpool_errata_t errata; + const char *health; + uint_t c; + vdev_stat_t *vs; + + /* If dedup stats were requested, also fetch dedupcached. */ + if (cbp->cb_dedup_stats > 1) + zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); + + config = zpool_get_config(zhp, NULL); + reason = zpool_get_status(zhp, &msgid, &errata); + + cbp->cb_count++; + + /* + * If we were given 'zpool status -x', only report those pools with + * problems. + */ + if (cbp->cb_explain && + (reason == ZPOOL_STATUS_OK || + reason == ZPOOL_STATUS_VERSION_OLDER || + reason == ZPOOL_STATUS_FEAT_DISABLED || + reason == ZPOOL_STATUS_COMPATIBILITY_ERR || + reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { + if (!cbp->cb_allpools) { + (void) printf(gettext("pool '%s' is healthy\n"), + zpool_get_name(zhp)); + if (cbp->cb_first) + cbp->cb_first = B_FALSE; + } + return (0); + } + + if (cbp->cb_first) + cbp->cb_first = B_FALSE; + else + (void) printf("\n"); + + nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); + verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, + (uint64_t **)&vs, &c) == 0); + + health = zpool_get_state_str(zhp); + + printf(" "); + printf_color(ANSI_BOLD, gettext("pool:")); + printf(" %s\n", zpool_get_name(zhp)); + fputc(' ', stdout); + printf_color(ANSI_BOLD, gettext("state: ")); + + printf_color(health_str_to_color(health), "%s", health); + + fputc('\n', stdout); + print_status_reason(zhp, cbp, reason, errata, NULL); + if (msgid != NULL) { printf(" "); printf_color(ANSI_BOLD, gettext("see:")); @@ -9459,7 +10916,11 @@ status_callback(zpool_handle_t *zhp, void *data) * -T Display a timestamp in date(1) or Unix format * -v Display complete error logs * -x Display only pools with potential problems + * -j Display output in JSON format * --power Display vdev enclosure slot power status + * --json-int Display numbers in inteeger format instead of string + * --json-flat-vdevs Display vdevs in flat hierarchy + * --json-pool-key-guid Use pool GUID as key for pool objects * * Describes the health status of all pools or some subset. */ @@ -9471,15 +10932,21 @@ zpool_do_status(int argc, char **argv) float interval = 0; unsigned long count = 0; status_cbdata_t cb = { 0 }; + nvlist_t *data; char *cmd = NULL; struct option long_options[] = { {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, + {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, + {"json-flat-vdevs", no_argument, NULL, + ZPOOL_OPTION_JSON_FLAT_VDEVS}, + {"json-pool-key-guid", no_argument, NULL, + ZPOOL_OPTION_POOL_KEY_GUID}, {0, 0, 0, 0} }; /* check options */ - while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options, + while ((c = getopt_long(argc, argv, "c:jDegiLpPstT:vx", long_options, NULL)) != -1) { switch (c) { case 'c': @@ -9540,12 +11007,25 @@ zpool_do_status(int argc, char **argv) case 'v': cb.cb_verbose = B_TRUE; break; + case 'j': + cb.cb_json = B_TRUE; + break; case 'x': cb.cb_explain = B_TRUE; break; case ZPOOL_OPTION_POWER: cb.cb_print_power = B_TRUE; break; + case ZPOOL_OPTION_JSON_FLAT_VDEVS: + cb.cb_flat_vdevs = B_TRUE; + break; + case ZPOOL_OPTION_JSON_NUMS_AS_INT: + cb.cb_json_as_int = B_TRUE; + cb.cb_literal = B_TRUE; + break; + case ZPOOL_OPTION_POOL_KEY_GUID: + cb.cb_json_pool_key_guid = B_TRUE; + break; case '?': if (optopt == 'c') { print_zpool_script_list("status"); @@ -9569,23 +11049,79 @@ zpool_do_status(int argc, char **argv) cb.cb_first = B_TRUE; cb.cb_print_status = B_TRUE; + if (cb.cb_flat_vdevs && !cb.cb_json) { + fprintf(stderr, gettext("'--json-flat-vdevs' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + + if (cb.cb_json_as_int && !cb.cb_json) { + (void) fprintf(stderr, gettext("'--json-int' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + + if (!cb.cb_json && cb.cb_json_pool_key_guid) { + (void) fprintf(stderr, gettext("'json-pool-key-guid' only" + " works with '-j' option\n")); + usage(B_FALSE); + } + for (;;) { - if (timestamp_fmt != NODATE) - print_timestamp(timestamp_fmt); + if (cb.cb_json) { + cb.cb_jsobj = zpool_json_schema(0, 1); + data = fnvlist_alloc(); + fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); + fnvlist_free(data); + } + + if (timestamp_fmt != NODATE) { + if (cb.cb_json) { + if (cb.cb_json_as_int) { + fnvlist_add_uint64(cb.cb_jsobj, "time", + time(NULL)); + } else { + char ts[128]; + get_timestamp(timestamp_fmt, ts, 128); + fnvlist_add_string(cb.cb_jsobj, "time", + ts); + } + } else + print_timestamp(timestamp_fmt); + } if (cmd != NULL) cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, NULL, NULL, 0, 0); - ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, - cb.cb_literal, status_callback, &cb); + if (cb.cb_json) { + ret = for_each_pool(argc, argv, B_TRUE, NULL, + ZFS_TYPE_POOL, cb.cb_literal, + status_callback_json, &cb); + } else { + ret = for_each_pool(argc, argv, B_TRUE, NULL, + ZFS_TYPE_POOL, cb.cb_literal, + status_callback, &cb); + } if (cb.vcdl != NULL) free_vdev_cmd_data_list(cb.vcdl); - if (argc == 0 && cb.cb_count == 0) - (void) fprintf(stderr, gettext("no pools available\n")); - else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) - (void) printf(gettext("all pools are healthy\n")); + + if (cb.cb_json) { + if (ret == 0) + zcmd_print_json(cb.cb_jsobj); + else + nvlist_free(cb.cb_jsobj); + } else { + if (argc == 0 && cb.cb_count == 0) { + (void) fprintf(stderr, "%s", + gettext("no pools available\n")); + } else if (cb.cb_explain && cb.cb_first && + cb.cb_allpools) { + (void) printf("%s", + gettext("all pools are healthy\n")); + } + } if (ret != 0) return (ret); @@ -10738,6 +12274,17 @@ get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; char value[ZFS_MAXPROPLEN]; zprop_source_t srctype; + nvlist_t *props, *item, *d; + props = item = d = NULL; + + if (cbp->cb_json) { + d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs"); + if (d == NULL) { + fprintf(stderr, "vdevs obj not found.\n"); + exit(1); + } + props = fnvlist_alloc(); + } for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { @@ -10759,9 +12306,22 @@ get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, prop_name, value, sizeof (value), &srctype, cbp->cb_literal) == 0) { - zprop_print_one_property(vdevname, cbp, prop_name, - value, srctype, NULL, NULL); + zprop_collect_property(vdevname, cbp, prop_name, + value, srctype, NULL, NULL, props); + } + } + + if (cbp->cb_json) { + if (!nvlist_empty(props)) { + item = fnvlist_alloc(); + fill_vdev_info(item, zhp, vdevname, B_TRUE, + cbp->cb_json_as_int); + fnvlist_add_nvlist(item, "properties", props); + fnvlist_add_nvlist(d, vdevname, item); + fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d); + fnvlist_free(item); } + fnvlist_free(props); } return (0); @@ -10805,8 +12365,18 @@ get_callback(zpool_handle_t *zhp, void *data) zprop_source_t srctype; zprop_list_t *pl; int vid; + int err = 0; + nvlist_t *props, *item, *d; + props = item = d = NULL; if (cbp->cb_type == ZFS_TYPE_VDEV) { + if (cbp->cb_json) { + nvlist_t *pool = fnvlist_alloc(); + fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int); + fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool); + fnvlist_free(pool); + } + if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { for_each_vdev(zhp, get_callback_vdev_cb, data); } else { @@ -10826,6 +12396,14 @@ get_callback(zpool_handle_t *zhp, void *data) } } else { assert(cbp->cb_type == ZFS_TYPE_POOL); + if (cbp->cb_json) { + d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); + if (d == NULL) { + fprintf(stderr, "pools obj not found.\n"); + exit(1); + } + props = fnvlist_alloc(); + } for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { /* * Skip the special fake placeholder. This will also @@ -10843,9 +12421,9 @@ get_callback(zpool_handle_t *zhp, void *data) value, sizeof (value), &srctype) != 0) continue; - zprop_print_one_property(zpool_get_name(zhp), - cbp, pl->pl_user_prop, value, srctype, - NULL, NULL); + err = zprop_collect_property( + zpool_get_name(zhp), cbp, pl->pl_user_prop, + value, srctype, NULL, NULL, props); } else if (pl->pl_prop == ZPROP_INVAL && (zpool_prop_feature(pl->pl_user_prop) || zpool_prop_unsupported(pl->pl_user_prop))) { @@ -10854,10 +12432,10 @@ get_callback(zpool_handle_t *zhp, void *data) if (zpool_prop_get_feature(zhp, pl->pl_user_prop, value, sizeof (value)) == 0) { - zprop_print_one_property( + err = zprop_collect_property( zpool_get_name(zhp), cbp, pl->pl_user_prop, value, srctype, - NULL, NULL); + NULL, NULL, props); } } else { if (zpool_get_prop(zhp, pl->pl_prop, value, @@ -10865,10 +12443,37 @@ get_callback(zpool_handle_t *zhp, void *data) cbp->cb_literal) != 0) continue; - zprop_print_one_property(zpool_get_name(zhp), - cbp, zpool_prop_to_name(pl->pl_prop), - value, srctype, NULL, NULL); + err = zprop_collect_property( + zpool_get_name(zhp), cbp, + zpool_prop_to_name(pl->pl_prop), + value, srctype, NULL, NULL, props); + } + if (err != 0) + return (err); + } + + if (cbp->cb_json) { + if (!nvlist_empty(props)) { + item = fnvlist_alloc(); + fill_pool_info(item, zhp, B_TRUE, + cbp->cb_json_as_int); + fnvlist_add_nvlist(item, "properties", props); + if (cbp->cb_json_pool_key_guid) { + char buf[256]; + uint64_t guid = fnvlist_lookup_uint64( + zpool_get_config(zhp, NULL), + ZPOOL_CONFIG_POOL_GUID); + snprintf(buf, 256, "%llu", + (u_longlong_t)guid); + fnvlist_add_nvlist(d, buf, item); + } else { + const char *name = zpool_get_name(zhp); + fnvlist_add_nvlist(d, name, item); + } + fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); + fnvlist_free(item); } + fnvlist_free(props); } } @@ -10883,6 +12488,9 @@ get_callback(zpool_handle_t *zhp, void *data) * -o List of columns to display. Defaults to * "name,property,value,source". * -p Display values in parsable (exact) format. + * -j Display output in JSON format. + * --json-int Display numbers as integers instead of strings. + * --json-pool-key-guid Set pool GUID as key for pool objects. * * Get properties of pools in the system. Output space statistics * for each one as well as other attributes. @@ -10896,6 +12504,7 @@ zpool_do_get(int argc, char **argv) int c, i; char *propstr = NULL; char *vdev = NULL; + nvlist_t *data = NULL; cb.cb_first = B_TRUE; @@ -10911,8 +12520,16 @@ zpool_do_get(int argc, char **argv) cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; current_prop_type = cb.cb_type; + struct option long_options[] = { + {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, + {"json-pool-key-guid", no_argument, NULL, + ZPOOL_OPTION_POOL_KEY_GUID}, + {0, 0, 0, 0} + }; + /* check options */ - while ((c = getopt(argc, argv, ":Hpo:")) != -1) { + while ((c = getopt_long(argc, argv, ":jHpo:", long_options, + NULL)) != -1) { switch (c) { case 'p': cb.cb_literal = B_TRUE; @@ -10920,6 +12537,18 @@ zpool_do_get(int argc, char **argv) case 'H': cb.cb_scripted = B_TRUE; break; + case 'j': + cb.cb_json = B_TRUE; + cb.cb_jsobj = zpool_json_schema(0, 1); + data = fnvlist_alloc(); + break; + case ZPOOL_OPTION_POOL_KEY_GUID: + cb.cb_json_pool_key_guid = B_TRUE; + break; + case ZPOOL_OPTION_JSON_NUMS_AS_INT: + cb.cb_json_as_int = B_TRUE; + cb.cb_literal = B_TRUE; + break; case 'o': memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); i = 0; @@ -10974,6 +12603,18 @@ zpool_do_get(int argc, char **argv) argc -= optind; argv += optind; + if (!cb.cb_json && cb.cb_json_as_int) { + (void) fprintf(stderr, gettext("'--json-int' only works with" + " '-j' option\n")); + usage(B_FALSE); + } + + if (!cb.cb_json && cb.cb_json_pool_key_guid) { + (void) fprintf(stderr, gettext("'json-pool-key-guid' only" + " works with '-j' option\n")); + usage(B_FALSE); + } + if (argc < 1) { (void) fprintf(stderr, gettext("missing property " "argument\n")); @@ -11008,6 +12649,10 @@ zpool_do_get(int argc, char **argv) cb.cb_type = ZFS_TYPE_VDEV; argc = 1; /* One pool to process */ } else { + if (cb.cb_json) { + nvlist_free(cb.cb_jsobj); + nvlist_free(data); + } fprintf(stderr, gettext("Expected a list of vdevs in" " \"%s\", but got:\n"), argv[0]); error_list_unresolved_vdevs(argc - 1, argv + 1, @@ -11017,6 +12662,10 @@ zpool_do_get(int argc, char **argv) return (1); } } else { + if (cb.cb_json) { + nvlist_free(cb.cb_jsobj); + nvlist_free(data); + } /* * The first arg isn't the name of a valid pool. */ @@ -11039,9 +12688,22 @@ zpool_do_get(int argc, char **argv) cb.cb_proplist = &fake_name; } + if (cb.cb_json) { + if (cb.cb_type == ZFS_TYPE_VDEV) + fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data); + else + fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); + fnvlist_free(data); + } + ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, cb.cb_literal, get_callback, &cb); + if (ret == 0 && cb.cb_json) + zcmd_print_json(cb.cb_jsobj); + else if (ret != 0 && cb.cb_json) + nvlist_free(cb.cb_jsobj); + if (cb.cb_proplist == &fake_name) zprop_free_list(fake_name.pl_next); else @@ -11688,8 +13350,39 @@ find_command_idx(const char *command, int *idx) static int zpool_do_version(int argc, char **argv) { - (void) argc, (void) argv; - return (zfs_version_print() != 0); + int c; + nvlist_t *jsobj = NULL, *zfs_ver = NULL; + boolean_t json = B_FALSE; + while ((c = getopt(argc, argv, "j")) != -1) { + switch (c) { + case 'j': + json = B_TRUE; + jsobj = zpool_json_schema(0, 1); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + if (argc != 0) { + (void) fprintf(stderr, "too many arguments\n"); + usage(B_FALSE); + } + + if (json) { + zfs_ver = zfs_version_nvlist(); + if (zfs_ver) { + fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver); + zcmd_print_json(jsobj); + fnvlist_free(zfs_ver); + return (0); + } else + return (-1); + } else + return (zfs_version_print() != 0); } /* Display documentation */ diff --git a/include/libzfs.h b/include/libzfs.h index 979b919ce2fa..d3663435c29d 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -327,6 +327,8 @@ _LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t); _LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); +_LIBZFS_H nvlist_t *zpool_find_parent_vdev(zpool_handle_t *, const char *, + boolean_t *, boolean_t *, boolean_t *); _LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); _LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, @@ -469,7 +471,8 @@ _LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *, char *altroot); _LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *, nvlist_t *, int); -_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config); +_LIBZFS_H void zpool_collect_unsup_feat(nvlist_t *config, char *buf, + size_t size); /* * Miscellaneous pool functions @@ -500,7 +503,7 @@ _LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *, size_t); _LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *); _LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int, - nvlist_t *); + nvlist_t *, char *, size_t); _LIBZFS_H int zpool_checkpoint(zpool_handle_t *); _LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *); _LIBZFS_H boolean_t zpool_is_draid_spare(const char *); @@ -631,6 +634,8 @@ _LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **, zfs_type_t); _LIBZFS_H void zprop_free_list(zprop_list_t *); +_LIBZFS_H void zcmd_print_json(nvlist_t *); + #define ZFS_GET_NCOLS 5 typedef enum { @@ -658,9 +663,13 @@ typedef struct zprop_get_cbdata { boolean_t cb_scripted; boolean_t cb_literal; boolean_t cb_first; + boolean_t cb_json; zprop_list_t *cb_proplist; zfs_type_t cb_type; vdev_cbdata_t cb_vdevs; + nvlist_t *cb_jsobj; + boolean_t cb_json_as_int; + boolean_t cb_json_pool_key_guid; } zprop_get_cbdata_t; #define ZFS_SET_NOMOUNT 1 @@ -674,6 +683,13 @@ _LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *, const char *, const char *, zprop_source_t, const char *, const char *); +_LIBZFS_H int zprop_nvlist_one_property(const char *, const char *, + zprop_source_t, const char *, const char *, nvlist_t *, boolean_t); + +_LIBZFS_H int zprop_collect_property(const char *, zprop_get_cbdata_t *, + const char *, const char *, zprop_source_t, const char *, + const char *, nvlist_t *); + /* * Iterator functions. */ @@ -979,6 +995,7 @@ _LIBZFS_H boolean_t libzfs_envvar_is_set(const char *); _LIBZFS_H const char *zfs_version_userland(void); _LIBZFS_H char *zfs_version_kernel(void); _LIBZFS_H int zfs_version_print(void); +_LIBZFS_H nvlist_t *zfs_version_nvlist(void); /* * Given a device or file, determine if it is part of a pool. diff --git a/lib/libspl/include/statcommon.h b/lib/libspl/include/statcommon.h index 971997a447a5..6b7cd0c105e1 100644 --- a/lib/libspl/include/statcommon.h +++ b/lib/libspl/include/statcommon.h @@ -37,5 +37,9 @@ /* Print a timestamp in either Unix or standard format. */ void print_timestamp(uint_t); +/* Return timestamp in either Unix or standard format in provided buffer */ +void get_timestamp(uint_t, char *, int); +/* convert time_t to standard format */ +void format_timestamp(time_t, char *, int); #endif /* _STATCOMMON_H */ diff --git a/lib/libspl/timestamp.c b/lib/libspl/timestamp.c index 9b435221f5fb..efe21fc1c0e5 100644 --- a/lib/libspl/timestamp.c +++ b/lib/libspl/timestamp.c @@ -62,3 +62,45 @@ print_timestamp(uint_t timestamp_fmt) (void) printf("%s\n", dstr); } } + +/* + * Return timestamp as decimal reprentation (in string) of time_t + * value (-T u was specified) or in date(1) format (-T d was specified). + */ +void +get_timestamp(uint_t timestamp_fmt, char *buf, int len) +{ + time_t t = time(NULL); + static const char *fmt = NULL; + + /* We only need to retrieve this once per invocation */ + if (fmt == NULL) + fmt = nl_langinfo(_DATE_FMT); + + if (timestamp_fmt == UDATE) { + (void) snprintf(buf, len, "%lld", (longlong_t)t); + } else if (timestamp_fmt == DDATE) { + struct tm tm; + strftime(buf, len, fmt, localtime_r(&t, &tm)); + } +} + +/* + * Format the provided time stamp to human readable format + */ +void +format_timestamp(time_t t, char *buf, int len) +{ + struct tm tm; + static const char *fmt = NULL; + + if (t == 0) { + snprintf(buf, len, "-"); + return; + } + + /* We only need to retrieve this once per invocation */ + if (fmt == NULL) + fmt = nl_langinfo(_DATE_FMT); + strftime(buf, len, fmt, localtime_r(&t, &tm)); +} diff --git a/lib/libuutil/libuutil.abi b/lib/libuutil/libuutil.abi index 2ed2fb2e41e6..1ad837b0edf8 100644 --- a/lib/libuutil/libuutil.abi +++ b/lib/libuutil/libuutil.abi @@ -143,7 +143,9 @@ + + @@ -1151,6 +1153,18 @@ + + + + + + + + + + + + diff --git a/lib/libzfs/libzfs.abi b/lib/libzfs/libzfs.abi index 4394c94208f4..51c8dc9647ee 100644 --- a/lib/libzfs/libzfs.abi +++ b/lib/libzfs/libzfs.abi @@ -179,10 +179,12 @@ + + @@ -280,6 +282,7 @@ + @@ -452,6 +455,7 @@ + @@ -482,6 +486,7 @@ + @@ -524,7 +529,7 @@ - + @@ -577,12 +582,14 @@ + + @@ -1284,6 +1291,18 @@ + + + + + + + + + + + + @@ -6456,6 +6475,8 @@ + + @@ -6465,8 +6486,10 @@ - + + + @@ -6518,6 +6541,14 @@ + + + + + + + + @@ -8194,6 +8225,20 @@ + + + + + + + + + + + + + + @@ -8204,6 +8249,17 @@ + + + + + + + + + + + @@ -8232,6 +8288,9 @@ + + + diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index b787d91a20d2..b9ddd75f0dfc 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -1990,23 +1990,18 @@ zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, void zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, - nvlist_t *config) + nvlist_t *config, char *buf, size_t size) { nvlist_t *nv = NULL; int64_t loss = -1; uint64_t edata = UINT64_MAX; uint64_t rewindto; struct tm t; - char timestr[128]; + char timestr[128], temp[1024]; if (!hdl->libzfs_printerr) return; - if (reason >= 0) - (void) printf(dgettext(TEXT_DOMAIN, "action: ")); - else - (void) printf(dgettext(TEXT_DOMAIN, "\t")); - /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || @@ -2017,56 +2012,61 @@ zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, &edata); - (void) printf(dgettext(TEXT_DOMAIN, + (void) snprintf(buf, size, dgettext(TEXT_DOMAIN, "Recovery is possible, but will result in some data loss.\n")); if (localtime_r((time_t *)&rewindto, &t) != NULL && ctime_r((time_t *)&rewindto, timestr) != NULL) { timestr[24] = 0; - (void) printf(dgettext(TEXT_DOMAIN, + (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, "\tReturning the pool to its state as of %s\n" - "\tshould correct the problem. "), - timestr); + "\tshould correct the problem. "), timestr); + (void) strlcat(buf, temp, size); } else { - (void) printf(dgettext(TEXT_DOMAIN, + (void) strlcat(buf, dgettext(TEXT_DOMAIN, "\tReverting the pool to an earlier state " - "should correct the problem.\n\t")); + "should correct the problem.\n\t"), size); } if (loss > 120) { - (void) printf(dgettext(TEXT_DOMAIN, + (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, "Approximately %lld minutes of data\n" "\tmust be discarded, irreversibly. "), ((longlong_t)loss + 30) / 60); + (void) strlcat(buf, temp, size); } else if (loss > 0) { - (void) printf(dgettext(TEXT_DOMAIN, + (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, "Approximately %lld seconds of data\n" "\tmust be discarded, irreversibly. "), (longlong_t)loss); + (void) strlcat(buf, temp, size); } if (edata != 0 && edata != UINT64_MAX) { if (edata == 1) { - (void) printf(dgettext(TEXT_DOMAIN, + (void) strlcat(buf, dgettext(TEXT_DOMAIN, "After rewind, at least\n" - "\tone persistent user-data error will remain. ")); + "\tone persistent user-data error will remain. "), + size); } else { - (void) printf(dgettext(TEXT_DOMAIN, + (void) strlcat(buf, dgettext(TEXT_DOMAIN, "After rewind, several\n" - "\tpersistent user-data errors will remain. ")); + "\tpersistent user-data errors will remain. "), + size); } } - (void) printf(dgettext(TEXT_DOMAIN, + (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN, "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), reason >= 0 ? "clear" : "import", name); + (void) strlcat(buf, temp, size); - (void) printf(dgettext(TEXT_DOMAIN, + (void) strlcat(buf, dgettext(TEXT_DOMAIN, "A scrub of the pool\n" - "\tis strongly recommended after recovery.\n")); + "\tis strongly recommended after recovery.\n"), size); return; no_info: - (void) printf(dgettext(TEXT_DOMAIN, - "Destroy and re-create the pool from\n\ta backup source.\n")); + (void) strlcat(buf, dgettext(TEXT_DOMAIN, + "Destroy and re-create the pool from\n\ta backup source.\n"), size); } /* @@ -2135,9 +2135,10 @@ print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, } void -zpool_print_unsup_feat(nvlist_t *config) +zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size) { nvlist_t *nvinfo, *unsup_feat; + char temp[512]; nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT); @@ -2145,10 +2146,14 @@ zpool_print_unsup_feat(nvlist_t *config) for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) { const char *desc = fnvpair_value_string(nvp); - if (strlen(desc) > 0) - (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); - else - (void) printf("\t%s\n", nvpair_name(nvp)); + if (strlen(desc) > 0) { + (void) snprintf(temp, 512, "\t%s (%s)\n", + nvpair_name(nvp), desc); + (void) strlcat(buf, temp, size); + } else { + (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp)); + (void) strlcat(buf, temp, size); + } } } @@ -2171,6 +2176,7 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, const char *origname; int ret; int error = 0; + char buf[2048]; char errbuf[ERRBUFLEN]; origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); @@ -2253,7 +2259,9 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, (void) printf(dgettext(TEXT_DOMAIN, "This " "pool uses the following feature(s) not " "supported by this system:\n")); - zpool_print_unsup_feat(nv); + memset(buf, 0, 2048); + zpool_collect_unsup_feat(nv, buf, 2048); + (void) printf("%s", buf); if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY)) { (void) printf(dgettext(TEXT_DOMAIN, @@ -2352,8 +2360,11 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, break; default: (void) zpool_standard_error(hdl, error, desc); + memset(buf, 0, 2048); zpool_explain_recover(hdl, - newname ? origname : thename, -error, nv); + newname ? origname : thename, -error, nv, + buf, 2048); + (void) printf("\t%s", buf); break; } @@ -2852,10 +2863,13 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) * the nvpair name to determine how we should look for the device. * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL * spare; but FALSE if its an INUSE spare. + * + * If 'return_parent' is set, then return the *parent* of the vdev you're + * searching for rather than the vdev itself. */ static nvlist_t * vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, - boolean_t *l2cache, boolean_t *log) + boolean_t *l2cache, boolean_t *log, boolean_t return_parent) { uint_t c, children; nvlist_t **child; @@ -2863,6 +2877,8 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, uint64_t is_log; const char *srchkey; nvpair_t *pair = nvlist_next_nvpair(search, NULL); + const char *tmp = NULL; + boolean_t is_root; /* Nothing to look for */ if (search == NULL || pair == NULL) @@ -2871,6 +2887,12 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, /* Obtain the key we will use to search */ srchkey = nvpair_name(pair); + nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp); + if (strcmp(tmp, "root") == 0) + is_root = B_TRUE; + else + is_root = B_FALSE; + switch (nvpair_type(pair)) { case DATA_TYPE_UINT64: if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { @@ -3001,7 +3023,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, - avail_spare, l2cache, NULL)) != NULL) { + avail_spare, l2cache, NULL, return_parent)) != NULL) { /* * The 'is_log' value is only set for the toplevel * vdev, not the leaf vdevs. So we always lookup the @@ -3014,7 +3036,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, is_log) { *log = B_TRUE; } - return (ret); + return (ret && return_parent && !is_root ? nv : ret); } } @@ -3022,9 +3044,11 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, &child, &children) == 0) { for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, - avail_spare, l2cache, NULL)) != NULL) { + avail_spare, l2cache, NULL, return_parent)) + != NULL) { *avail_spare = B_TRUE; - return (ret); + return (ret && return_parent && + !is_root ? nv : ret); } } } @@ -3033,9 +3057,11 @@ vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, &child, &children) == 0) { for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, - avail_spare, l2cache, NULL)) != NULL) { + avail_spare, l2cache, NULL, return_parent)) + != NULL) { *l2cache = B_TRUE; - return (ret); + return (ret && return_parent && + !is_root ? nv : ret); } } } @@ -3070,7 +3096,8 @@ zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, *l2cache = B_FALSE; if (log != NULL) *log = B_FALSE; - ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); + ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, + B_FALSE); fnvlist_free(search); return (ret); @@ -3098,11 +3125,12 @@ zpool_vdev_is_interior(const char *name) } /* - * Lookup the nvlist for a given vdev. + * Lookup the nvlist for a given vdev or vdev's parent (depending on + * if 'return_parent' is set). */ -nvlist_t * -zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, - boolean_t *l2cache, boolean_t *log) +static nvlist_t * +__zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, + boolean_t *l2cache, boolean_t *log, boolean_t return_parent) { char *end; nvlist_t *nvroot, *search, *ret; @@ -3139,12 +3167,30 @@ zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, *l2cache = B_FALSE; if (log != NULL) *log = B_FALSE; - ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); + ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log, + return_parent); fnvlist_free(search); return (ret); } +nvlist_t * +zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, + boolean_t *l2cache, boolean_t *log) +{ + return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, + B_FALSE)); +} + +/* Given a vdev path, return its parent's nvlist */ +nvlist_t * +zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path, + boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) +{ + return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log, + B_TRUE)); +} + /* * Convert a vdev path to a GUID. Returns GUID or 0 on error. * diff --git a/lib/libzfs/libzfs_util.c b/lib/libzfs/libzfs_util.c index b865af71a1dc..cba071a1a900 100644 --- a/lib/libzfs/libzfs_util.c +++ b/lib/libzfs/libzfs_util.c @@ -68,6 +68,7 @@ * as necessary. */ #define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):" +#define STR_NUMS "0123456789" int libzfs_errno(libzfs_handle_t *hdl) @@ -1267,6 +1268,14 @@ zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp) * ================================================================ */ +void +zcmd_print_json(nvlist_t *nvl) +{ + nvlist_print_json(stdout, nvl); + (void) putchar('\n'); + nvlist_free(nvl); +} + static void zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type) { @@ -1393,6 +1402,103 @@ zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type) (void) printf("\n"); } +/* + * Add property value and source to provided nvlist, according to + * settings in cb structure. Later to be printed in JSON format. + */ +int +zprop_nvlist_one_property(const char *propname, + const char *value, zprop_source_t sourcetype, const char *source, + const char *recvd_value, nvlist_t *nvl, boolean_t as_int) +{ + int ret = 0; + nvlist_t *src_nv, *prop; + boolean_t all_numeric = strspn(value, STR_NUMS) == strlen(value); + src_nv = prop = NULL; + + if ((nvlist_alloc(&prop, NV_UNIQUE_NAME, 0) != 0) || + (nvlist_alloc(&src_nv, NV_UNIQUE_NAME, 0) != 0)) { + ret = -1; + goto err; + } + + if (as_int && all_numeric) { + uint64_t val; + sscanf(value, "%lld", (u_longlong_t *)&val); + if (nvlist_add_uint64(prop, "value", val) != 0) { + ret = -1; + goto err; + } + } else { + if (nvlist_add_string(prop, "value", value) != 0) { + ret = -1; + goto err; + } + } + + switch (sourcetype) { + case ZPROP_SRC_NONE: + if (nvlist_add_string(src_nv, "type", "NONE") != 0 || + (nvlist_add_string(src_nv, "data", "-") != 0)) { + ret = -1; + goto err; + } + break; + case ZPROP_SRC_DEFAULT: + if (nvlist_add_string(src_nv, "type", "DEFAULT") != 0 || + (nvlist_add_string(src_nv, "data", "-") != 0)) { + ret = -1; + goto err; + } + break; + case ZPROP_SRC_LOCAL: + if (nvlist_add_string(src_nv, "type", "LOCAL") != 0 || + (nvlist_add_string(src_nv, "data", "-") != 0)) { + ret = -1; + goto err; + } + break; + case ZPROP_SRC_TEMPORARY: + if (nvlist_add_string(src_nv, "type", "TEMPORARY") != 0 || + (nvlist_add_string(src_nv, "data", "-") != 0)) { + ret = -1; + goto err; + } + break; + case ZPROP_SRC_INHERITED: + if (nvlist_add_string(src_nv, "type", "INHERITED") != 0 || + (nvlist_add_string(src_nv, "data", source) != 0)) { + ret = -1; + goto err; + } + break; + case ZPROP_SRC_RECEIVED: + if (nvlist_add_string(src_nv, "type", "RECEIVED") != 0 || + (nvlist_add_string(src_nv, "data", + (recvd_value == NULL ? "-" : recvd_value)) != 0)) { + ret = -1; + goto err; + } + break; + default: + assert(!"unhandled zprop_source_t"); + if (nvlist_add_string(src_nv, "type", + "unhandled zprop_source_t") != 0) { + ret = -1; + goto err; + } + } + if ((nvlist_add_nvlist(prop, "source", src_nv) != 0) || + (nvlist_add_nvlist(nvl, propname, prop)) != 0) { + ret = -1; + goto err; + } +err: + nvlist_free(src_nv); + nvlist_free(prop); + return (ret); +} + /* * Display a single line of output, according to the settings in the callback * structure. @@ -1484,6 +1590,26 @@ zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp, (void) printf("\n"); } +int +zprop_collect_property(const char *name, zprop_get_cbdata_t *cbp, + const char *propname, const char *value, zprop_source_t sourcetype, + const char *source, const char *recvd_value, nvlist_t *nvl) +{ + if (cbp->cb_json) { + if ((sourcetype & cbp->cb_sources) == 0) + return (0); + else { + return (zprop_nvlist_one_property(propname, value, + sourcetype, source, recvd_value, nvl, + cbp->cb_json_as_int)); + } + } else { + zprop_print_one_property(name, cbp, + propname, value, sourcetype, source, recvd_value); + return (0); + } +} + /* * Given a numeric suffix, convert the value into a number of bits that the * resulting value must be shifted. @@ -1999,6 +2125,34 @@ zfs_version_print(void) return (0); } +/* + * Returns an nvlist with both zfs userland and kernel versions. + * Returns NULL on error. + */ +nvlist_t * +zfs_version_nvlist(void) +{ + nvlist_t *nvl; + char kmod_ver[64]; + if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) + return (NULL); + if (nvlist_add_string(nvl, "userland", ZFS_META_ALIAS) != 0) + goto err; + char *kver = zfs_version_kernel(); + if (kver == NULL) { + fprintf(stderr, "zfs_version_kernel() failed: %s\n", + zfs_strerror(errno)); + goto err; + } + (void) snprintf(kmod_ver, 64, "zfs-kmod-%s", kver); + if (nvlist_add_string(nvl, "kernel", kmod_ver) != 0) + goto err; + return (nvl); +err: + nvlist_free(nvl); + return (NULL); +} + /* * Return 1 if the user requested ANSI color output, and our terminal supports * it. Return 0 for no color. diff --git a/lib/libzfs_core/libzfs_core.abi b/lib/libzfs_core/libzfs_core.abi index 02b2dc7182b1..1062a6b52dff 100644 --- a/lib/libzfs_core/libzfs_core.abi +++ b/lib/libzfs_core/libzfs_core.abi @@ -126,7 +126,9 @@ + + @@ -1110,6 +1112,18 @@ + + + + + + + + + + + + diff --git a/man/man8/zfs-list.8 b/man/man8/zfs-list.8 index 85bd3fbafced..b49def08b72b 100644 --- a/man/man8/zfs-list.8 +++ b/man/man8/zfs-list.8 @@ -41,6 +41,7 @@ .Cm list .Op Fl r Ns | Ns Fl d Ar depth .Op Fl Hp +.Op Fl j Op Ar --json-int .Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns … Oc .Oo Fl s Ar property Oc Ns … .Oo Fl S Ar property Oc Ns … @@ -70,6 +71,11 @@ The following fields are displayed: Used for scripting mode. Do not print headers and separate fields by a single tab instead of arbitrary white space. +.It Fl j Op Ar --json-int +Print the output in JSON format. +Specify +.Sy --json-int +to print the numbers in integer format instead of strings in JSON output. .It Fl d Ar depth Recursively display any children of the dataset, limiting the recursion to .Ar depth . @@ -186,6 +192,161 @@ pool/home 315K 457G 21K /export/home pool/home/anne 18K 457G 18K /export/home/anne pool/home/bob 276K 457G 276K /export/home/bob .Ed +.Ss Example 2 : No Listing ZFS filesystems and snapshots in JSON format +.Bd -literal -compact -offset Ds +.No # Nm zfs Cm list Fl j Fl t Ar filesystem,snapshot | Cm jq +{ + "output_version": { + "command": "zfs list", + "vers_major": 0, + "vers_minor": 1 + }, + "datasets": { + "pool": { + "name": "pool", + "type": "FILESYSTEM", + "pool": "pool", + "properties": { + "used": { + "value": "290K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "available": { + "value": "30.5G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "referenced": { + "value": "24K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "mountpoint": { + "value": "/pool", + "source": { + "type": "DEFAULT", + "data": "-" + } + } + } + }, + "pool/home": { + "name": "pool/home", + "type": "FILESYSTEM", + "pool": "pool", + "properties": { + "used": { + "value": "48K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "available": { + "value": "30.5G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "referenced": { + "value": "24K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "mountpoint": { + "value": "/mnt/home", + "source": { + "type": "LOCAL", + "data": "-" + } + } + } + }, + "pool/home/bob": { + "name": "pool/home/bob", + "type": "FILESYSTEM", + "pool": "pool", + "properties": { + "used": { + "value": "24K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "available": { + "value": "30.5G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "referenced": { + "value": "24K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "mountpoint": { + "value": "/mnt/home/bob", + "source": { + "type": "INHERITED", + "data": "pool/home" + } + } + } + }, + "pool/home/bob@v1": { + "name": "pool/home/bob@v1", + "type": "SNAPSHOT", + "pool": "pool", + "dataset": "pool/home/bob", + "snapshot_name": "v1", + "properties": { + "used": { + "value": "0B", + "source": { + "type": "NONE", + "data": "-" + } + }, + "available": { + "value": "-", + "source": { + "type": "NONE", + "data": "-" + } + }, + "referenced": { + "value": "24K", + "source": { + "type": "NONE", + "data": "-" + } + }, + "mountpoint": { + "value": "-", + "source": { + "type": "NONE", + "data": "-" + } + } + } + } + } +} +.Ed . .Sh SEE ALSO .Xr zfsprops 7 , diff --git a/man/man8/zfs-mount.8 b/man/man8/zfs-mount.8 index 20dbe4d0e648..6116fbaab77f 100644 --- a/man/man8/zfs-mount.8 +++ b/man/man8/zfs-mount.8 @@ -39,6 +39,7 @@ .Sh SYNOPSIS .Nm zfs .Cm mount +.Op Fl j .Nm zfs .Cm mount .Op Fl Oflv @@ -54,8 +55,13 @@ .It Xo .Nm zfs .Cm mount +.Op Fl j .Xc Displays all ZFS file systems currently mounted. +.Bl -tag -width "-j" +.It Fl j +Displays all mounted file systems in JSON format. +.El .It Xo .Nm zfs .Cm mount diff --git a/man/man8/zfs-set.8 b/man/man8/zfs-set.8 index 8cc19caf3f00..204450d72ec9 100644 --- a/man/man8/zfs-set.8 +++ b/man/man8/zfs-set.8 @@ -46,6 +46,7 @@ .Cm get .Op Fl r Ns | Ns Fl d Ar depth .Op Fl Hp +.Op Fl j Op Ar --json-int .Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns … Oc .Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns … Oc .Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns … Oc @@ -91,6 +92,7 @@ dataset. .Cm get .Op Fl r Ns | Ns Fl d Ar depth .Op Fl Hp +.Op Fl j Op Ar --json-int .Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns … Oc .Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns … Oc .Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns … Oc @@ -128,6 +130,11 @@ The value can be used to display all properties that apply to the given dataset's type .Pq Sy filesystem , volume , snapshot , No or Sy bookmark . .Bl -tag -width "-s source" +.It Fl j Op Ar --json-int +Display the output in JSON format. +Specify +.Sy --json-int +to display numbers in integer format instead of strings for JSON output. .It Fl H Display output in a form more easily parsed by scripts. Any headers are omitted, and fields are explicitly separated by a single tab @@ -283,6 +290,50 @@ The following command gets a single property value: on .Ed .Pp +The following command gets a single property value recursively in JSON format: +.Bd -literal -compact -offset Ds +.No # Nm zfs Cm get Fl j Fl r Sy mountpoint Ar pool/home | Nm jq +{ + "output_version": { + "command": "zfs get", + "vers_major": 0, + "vers_minor": 1 + }, + "datasets": { + "pool/home": { + "name": "pool/home", + "type": "FILESYSTEM", + "pool": "pool", + "createtxg": "10", + "properties": { + "mountpoint": { + "value": "/pool/home", + "source": { + "type": "DEFAULT", + "data": "-" + } + } + } + }, + "pool/home/bob": { + "name": "pool/home/bob", + "type": "FILESYSTEM", + "pool": "pool", + "createtxg": "1176", + "properties": { + "mountpoint": { + "value": "/pool/home/bob", + "source": { + "type": "DEFAULT", + "data": "-" + } + } + } + } + } +} +.Ed +.Pp The following command lists all properties with local settings for .Ar pool/home/bob : .Bd -literal -compact -offset Ds diff --git a/man/man8/zfs.8 b/man/man8/zfs.8 index dd578cb74aac..2ee15ab21806 100644 --- a/man/man8/zfs.8 +++ b/man/man8/zfs.8 @@ -48,6 +48,7 @@ .Fl ?V .Nm .Cm version +.Op Fl j .Nm .Cm subcommand .Op Ar arguments @@ -153,10 +154,14 @@ Displays a help message. .It Xo .Nm .Cm version +.Op Fl j .Xc Displays the software version of the .Nm userland utility and the zfs kernel module. +Use +.Fl j +option to output in JSON format. .El . .Ss Dataset Management diff --git a/man/man8/zpool-get.8 b/man/man8/zpool-get.8 index 78a39b07d749..5384906f17f2 100644 --- a/man/man8/zpool-get.8 +++ b/man/man8/zpool-get.8 @@ -37,6 +37,7 @@ .Nm zpool .Cm get .Op Fl Hp +.Op Fl j Op Ar --json-int, --json-pool-key-guid .Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns … .Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns … .Oo Ar pool Oc Ns … @@ -44,6 +45,7 @@ .Nm zpool .Cm get .Op Fl Hp +.Op Fl j Op Ar --json-int .Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns … .Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns … .Ar pool @@ -67,6 +69,7 @@ .Nm zpool .Cm get .Op Fl Hp +.Op Fl j Op Ar --json-int, --json-pool-key-guid .Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns … .Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns … .Oo Ar pool Oc Ns … @@ -95,6 +98,14 @@ See the .Xr zpoolprops 7 manual page for more information on the available pool properties. .Bl -tag -compact -offset Ds -width "-o field" +.It Fl j Op Ar --json-int, --json-pool-key-guid +Display the list of properties in JSON format. +Specify +.Sy --json-int +to display the numbers in integer format instead of strings in JSON output. +Specify +.Sy --json-pool-key-guid +to set pool GUID as key for pool objects instead of pool name. .It Fl H Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary @@ -108,6 +119,7 @@ Display numbers in parsable (exact) values. .It Xo .Nm zpool .Cm get +.Op Fl j Op Ar --json-int .Op Fl Hp .Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns … .Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns … @@ -145,6 +157,11 @@ See the .Xr vdevprops 7 manual page for more information on the available pool properties. .Bl -tag -compact -offset Ds -width "-o field" +.It Fl j Op Ar --json-int +Display the list of properties in JSON format. +Specify +.Sy --json-int +to display the numbers in integer format instead of strings in JSON output. .It Fl H Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary diff --git a/man/man8/zpool-list.8 b/man/man8/zpool-list.8 index c60c47f5eb3d..b0ee659701d4 100644 --- a/man/man8/zpool-list.8 +++ b/man/man8/zpool-list.8 @@ -37,6 +37,7 @@ .Nm zpool .Cm list .Op Fl HgLpPv +.Op Fl j Op Ar --json-int, --json-pool-key-guid .Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns … .Op Fl T Sy u Ns | Ns Sy d .Oo Ar pool Oc Ns … @@ -58,6 +59,14 @@ is specified, the command exits after .Ar count reports are printed. .Bl -tag -width Ds +.It Fl j Op Ar --json-int, --json-pool-key-guid +Display the list of pools in JSON format. +Specify +.Sy --json-int +to display the numbers in integer format instead of strings. +Specify +.Sy --json-pool-key-guid +to set pool GUID as key for pool objects instead of pool names. .It Fl g Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool @@ -139,6 +148,104 @@ data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE - sda - - - - - sdb - - - 10G - sdc - - - - - +.Ed +. +.Ss Example 3 : No Displaying expanded space on a device +The following command lists all available pools on the system in JSON +format. +.Bd -literal -compact -offset Ds +.No # Nm zpool Cm list Fl j | Nm jq +{ + "output_version": { + "command": "zpool list", + "vers_major": 0, + "vers_minor": 1 + }, + "pools": { + "tank": { + "name": "tank", + "type": "POOL", + "state": "ONLINE", + "guid": "15220353080205405147", + "txg": "2671", + "spa_version": "5000", + "zpl_version": "5", + "properties": { + "size": { + "value": "111G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "allocated": { + "value": "30.8G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "free": { + "value": "80.2G", + "source": { + "type": "NONE", + "data": "-" + } + }, + "checkpoint": { + "value": "-", + "source": { + "type": "NONE", + "data": "-" + } + }, + "expandsize": { + "value": "-", + "source": { + "type": "NONE", + "data": "-" + } + }, + "fragmentation": { + "value": "0%", + "source": { + "type": "NONE", + "data": "-" + } + }, + "capacity": { + "value": "27%", + "source": { + "type": "NONE", + "data": "-" + } + }, + "dedupratio": { + "value": "1.00x", + "source": { + "type": "NONE", + "data": "-" + } + }, + "health": { + "value": "ONLINE", + "source": { + "type": "NONE", + "data": "-" + } + }, + "altroot": { + "value": "-", + "source": { + "type": "DEFAULT", + "data": "-" + } + } + } + } + } +} + .Ed . .Sh SEE ALSO diff --git a/man/man8/zpool-status.8 b/man/man8/zpool-status.8 index d570c852d787..b40faeb9977f 100644 --- a/man/man8/zpool-status.8 +++ b/man/man8/zpool-status.8 @@ -41,6 +41,7 @@ .Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns … .Oo Ar pool Oc Ns … .Op Ar interval Op Ar count +.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid . .Sh DESCRIPTION Displays the detailed health status for the given pools. @@ -69,6 +70,17 @@ See the option of .Nm zpool Cm iostat for complete details. +.It Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid +Display the status for ZFS pools in JSON format. +Specify +.Sy --json-int +to display numbers in integer format instead of strings. +Specify +.Sy --json-flat-vdevs +to display vdevs in flat hierarchy instead of nested vdev objects. +Specify +.Sy --json-pool-key-guid +to set pool GUID as key for pool objects instead of pool names. .It Fl D Display a histogram of deduplication statistics, showing the allocated .Pq physically present on disk @@ -161,6 +173,175 @@ rpool 14.6G 54.9G 4 55 250K 2.69M ---------- ----- ----- ----- ----- ----- ----- ---- .Ed . +.Ss Example 2 : No Display the status output in JSON format +.Nm zpool Cm status No can output in JSON format if +.Fl j +is specified. +.Fl c +can be used to run a script on each VDEV. +.Bd -literal -compact -offset Ds +.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq +{ + "output_version": { + "command": "zpool status", + "vers_major": 0, + "vers_minor": 1 + }, + "pools": { + "tank": { + "name": "tank", + "state": "ONLINE", + "guid": "3920273586464696295", + "txg": "16597", + "spa_version": "5000", + "zpl_version": "5", + "status": "OK", + "vdevs": { + "tank": { + "name": "tank", + "alloc_space": "62.6G", + "total_space": "15.0T", + "def_space": "11.3T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vdevs": { + "raidz1-0": { + "name": "raidz1-0", + "vdev_type": "raidz", + "guid": "763132626387621737", + "state": "HEALTHY", + "alloc_space": "62.5G", + "total_space": "10.9T", + "def_space": "7.26T", + "rep_dev_size": "10.9T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vdevs": { + "ca1eb824-c371-491d-ac13-37637e35c683": { + "name": "ca1eb824-c371-491d-ac13-37637e35c683", + "vdev_type": "disk", + "guid": "12841765308123764671", + "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683", + "state": "HEALTHY", + "rep_dev_size": "3.64T", + "phys_space": "3.64T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "WDC WD40EFZX-68AWUN0", + "size": "3.6T" + }, + "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": { + "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", + "vdev_type": "disk", + "guid": "1527839927278881561", + "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", + "state": "HEALTHY", + "rep_dev_size": "3.64T", + "phys_space": "3.64T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "WDC WD40EFZX-68AWUN0", + "size": "3.6T" + }, + "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": { + "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65", + "vdev_type": "disk", + "guid": "6982750226085199860", + "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65", + "state": "HEALTHY", + "rep_dev_size": "3.64T", + "phys_space": "3.64T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "WDC WD40EFZX-68AWUN0", + "size": "3.6T" + } + } + } + } + } + }, + "dedup": { + "mirror-2": { + "name": "mirror-2", + "vdev_type": "mirror", + "guid": "2227766268377771003", + "state": "HEALTHY", + "alloc_space": "89.1M", + "total_space": "3.62T", + "def_space": "3.62T", + "rep_dev_size": "3.62T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vdevs": { + "db017360-d8e9-4163-961b-144ca75293a3": { + "name": "db017360-d8e9-4163-961b-144ca75293a3", + "vdev_type": "disk", + "guid": "17880913061695450307", + "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3", + "state": "HEALTHY", + "rep_dev_size": "3.63T", + "phys_space": "3.64T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "WDC WD40EFZX-68AWUN0", + "size": "3.6T" + }, + "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": { + "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", + "vdev_type": "disk", + "guid": "10276374011610020557", + "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", + "state": "HEALTHY", + "rep_dev_size": "3.63T", + "phys_space": "3.64T", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "WDC WD40EFZX-68AWUN0", + "size": "3.6T" + } + } + } + }, + "special": { + "25d418f8-92bd-4327-b59f-7ef5d5f50d81": { + "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81", + "vdev_type": "disk", + "guid": "3935742873387713123", + "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81", + "state": "HEALTHY", + "alloc_space": "37.4M", + "total_space": "444G", + "def_space": "444G", + "rep_dev_size": "444G", + "phys_space": "447G", + "read_errors": "0", + "write_errors": "0", + "checksum_errors": "0", + "vendor": "ATA", + "model": "Micron_5300_MTFDDAK480TDS", + "size": "447.1G" + } + }, + "error_count": "0" + } + } +} +.Ed +. .Sh SEE ALSO .Xr zpool-events 8 , .Xr zpool-history 8 , diff --git a/man/man8/zpool.8 b/man/man8/zpool.8 index 2b966b72bf4c..c55644d9ecea 100644 --- a/man/man8/zpool.8 +++ b/man/man8/zpool.8 @@ -38,6 +38,7 @@ .Fl ?V .Nm .Cm version +.Op Fl j .Nm .Cm subcommand .Op Ar arguments @@ -79,10 +80,14 @@ Displays a help message. .It Xo .Nm .Cm version +.Op Fl j .Xc Displays the software version of the .Nm userland utility and the ZFS kernel module. +Use +.Fl j +option to output in JSON format. .El . .Ss Creation diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index 51a38d70bc66..81ebb6a10cca 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -153,6 +153,10 @@ tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos', 'clean_mirror_003_pos', 'clean_mirror_004_pos'] tags = ['functional', 'clean_mirror'] +[tests/functional/cli_root/json] +tests = ['json_sanity'] +tags = ['functional', 'cli_root', 'json'] + [tests/functional/cli_root/zinject] tests = ['zinject_args'] pre = diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index daa794551682..19770138bf14 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -46,6 +46,7 @@ export SYSTEM_FILES_COMMON='awk hostname id iostat + jq kill ksh ldd diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 73a6f00b5006..0223f2721ba7 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -606,6 +606,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/clean_mirror/clean_mirror_004_pos.ksh \ functional/clean_mirror/cleanup.ksh \ functional/clean_mirror/setup.ksh \ + functional/cli_root/json/cleanup.ksh \ + functional/cli_root/json/setup.ksh \ + functional/cli_root/json/json_sanity.ksh \ functional/cli_root/zinject/zinject_args.ksh \ functional/cli_root/zdb/zdb_002_pos.ksh \ functional/cli_root/zdb/zdb_003_pos.ksh \ diff --git a/tests/zfs-tests/tests/functional/cli_root/json/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_root/json/cleanup.ksh new file mode 100755 index 000000000000..f82a90962292 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/json/cleanup.ksh @@ -0,0 +1,31 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# Copyright (c) 2024 by Lawrence Livermore National Security, LLC. + +. $STF_SUITE/include/libtest.shlib + +zpool destroy testpool1 +zpool destroy testpool2 + +rm $TESTDIR/file{1..28} +rmdir $TESTDIR diff --git a/tests/zfs-tests/tests/functional/cli_root/json/json_sanity.ksh b/tests/zfs-tests/tests/functional/cli_root/json/json_sanity.ksh new file mode 100755 index 000000000000..e598dd57181e --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/json/json_sanity.ksh @@ -0,0 +1,57 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END + +# Copyright (c) 2024 by Lawrence Livermore National Security, LLC. + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# Basic sanity check for valid JSON from zfs & zpool commands. +# +# STRATEGY: +# 1. Run different zfs/zpool -j commands and check for valid JSON + +list=( + "zpool status -j -g --json-int --json-flat-vdevs --json-pool-key-guid" + "zpool status -p -j -g --json-int --json-flat-vdevs --json-pool-key-guid" + "zpool status -j -c upath" + "zpool status -j" + "zpool status -j testpool1" + "zpool list -j" + "zpool list -j -g" + "zpool list -j -o fragmentation" + "zpool get -j size" + "zpool get -j all" + "zpool version -j" + "zfs list -j" + "zfs list -j testpool1" + "zfs get -j all" + "zfs get -j available" + "zfs mount -j" + "zfs version -j" +) + +for cmd in "${list[@]}" ; do + log_must eval "$cmd | jq > /dev/null" +done + +log_pass "zpool and zfs commands outputted valid JSON" diff --git a/tests/zfs-tests/tests/functional/cli_root/json/setup.ksh b/tests/zfs-tests/tests/functional/cli_root/json/setup.ksh new file mode 100755 index 000000000000..f94dc5697423 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/json/setup.ksh @@ -0,0 +1,50 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# Copyright (c) 2024 by Lawrence Livermore National Security, LLC. + +. $STF_SUITE/include/libtest.shlib + +# Sanity check that 'testpool1' or 'testpool2' don't exist +log_mustnot zpool status -j | \ + jq -e '.pools | has("testpool1") or has("testpool2")' &> /dev/null + +mkdir -p $TESTDIR +truncate -s 80M $TESTDIR/file{1..28} + +DISK=${DISKS%% *} + +# Create complex pool configs to exercise JSON +zpool create -f testpool1 draid $TESTDIR/file{1..10} \ + special $DISK \ + dedup $TESTDIR/file11 \ + special $TESTDIR/file12 \ + cache $TESTDIR/file13 \ + log $TESTDIR/file14 + +zpool create -f testpool2 mirror $TESTDIR/file{15,16} \ + raidz1 $TESTDIR/file{17,18,19} \ + cache $TESTDIR/file20 \ + log $TESTDIR/file21 \ + special mirror $TESTDIR/file{22,23} \ + dedup mirror $TESTDIR/file{24,25} \ + spare $TESTDIR/file{26,27,28}