mirror of
https://github.com/openzfs/zfs.git
synced 2025-10-01 02:46:29 +00:00

When running zpool iostat in interval mode, it would not notice any new
pools created or imported, and would forget any destroyed or exported,
so would not notice if they came back. This leads to outputting "no
pools available" every interval until killed.
It looks like this was at least intended to work; the comment above
zpool_do_iostat() indicates that it is expected to "deal with pool
creation/destruction" and that pool_list_update() would detect new
pools. That call however was removed in 3e43edd2c5
, though its unclear
if that broke this behaviour and it wasn't noticed, or if it never
worked, or if something later broke it. That said, the lack of
pool_list_update() is only part of the reason it doesn't work properly.
The fundamental problem is that the various things involved in
refreshing or updating the list of pools would aggressively ignore,
remove, skip or fail on pools that stop existing, or that already exist.
Mostly this meant that once a pool is removed from the list, it will
never be seen again. Restoring pool_list_update() to the
zpool_do_iostat() loop only partially fixes this - it would find "new"
pools again, but only in the "all pools" (no args) mode, and because its
iterator callback add_pool() would abort the iterator if it already has
a pool listed, it would only add pools if there weren't any already.
So, this commit reworks the structure somewhat. pool_list_update()
becomes pool_list_refresh(), and will ensure the state of all pools in
the list are updated. In the "all pools" mode, it will also add new
pools and remove pools that disappear, but when a fixed list of pools is
used, the list doesn't change, only the state of the pools within it.
The rest of the commit is adjusting things for this much simpler
structure. Regardless of the mode in use, pool_list_refresh() will
always do the right thing, so the driver code can just get on with the
display.
Now that pools can appear and disappear, I've made it so the header (if
enabled) is re-printed when the list changes, so that its easier to see
what's happening if the column widths change.
Since this is all rather complicated, I've included tests for the "all
pools" and "set of pools" modes.
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Igor Kozhukhov <igor@dilos.org>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17786
769 lines
18 KiB
C
769 lines
18 KiB
C
// SPDX-License-Identifier: CDDL-1.0
|
|
/*
|
|
* CDDL HEADER START
|
|
*
|
|
* The contents of this file are subject to the terms of the
|
|
* Common Development and Distribution License (the "License").
|
|
* You may not use this file except in compliance with the License.
|
|
*
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
|
* See the License for the specific language governing permissions
|
|
* and limitations under the License.
|
|
*
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
*
|
|
* CDDL HEADER END
|
|
*/
|
|
/*
|
|
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
|
|
* Use is subject to license terms.
|
|
*/
|
|
|
|
/*
|
|
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
|
|
* Copyright (c) 2025, Klara, Inc.
|
|
*/
|
|
|
|
#include <libintl.h>
|
|
#include <libuutil.h>
|
|
#include <stddef.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <thread_pool.h>
|
|
|
|
#include <libzfs.h>
|
|
#include <libzutil.h>
|
|
#include <sys/zfs_context.h>
|
|
#include <sys/wait.h>
|
|
|
|
#include "zpool_util.h"
|
|
|
|
/*
|
|
* Private interface for iterating over pools specified on the command line.
|
|
* Most consumers will call for_each_pool, but in order to support iostat, we
|
|
* allow fined grained control through the zpool_list_t interface.
|
|
*/
|
|
|
|
typedef struct zpool_node {
|
|
zpool_handle_t *zn_handle;
|
|
uu_avl_node_t zn_avlnode;
|
|
hrtime_t zn_last_refresh;
|
|
} zpool_node_t;
|
|
|
|
struct zpool_list {
|
|
boolean_t zl_findall;
|
|
boolean_t zl_literal;
|
|
uu_avl_t *zl_avl;
|
|
uu_avl_pool_t *zl_pool;
|
|
zprop_list_t **zl_proplist;
|
|
zfs_type_t zl_type;
|
|
hrtime_t zl_last_refresh;
|
|
};
|
|
|
|
static int
|
|
zpool_compare(const void *larg, const void *rarg, void *unused)
|
|
{
|
|
(void) unused;
|
|
zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
|
|
zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
|
|
const char *lname = zpool_get_name(l);
|
|
const char *rname = zpool_get_name(r);
|
|
|
|
return (strcmp(lname, rname));
|
|
}
|
|
|
|
/*
|
|
* Callback function for pool_list_get(). Adds the given pool to the AVL tree
|
|
* of known pools.
|
|
*/
|
|
static int
|
|
add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
|
|
{
|
|
zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
|
|
uu_avl_index_t idx;
|
|
|
|
new->zn_handle = zhp;
|
|
uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
|
|
|
|
node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
|
|
if (node == NULL) {
|
|
if (zlp->zl_proplist &&
|
|
zpool_expand_proplist(zhp, zlp->zl_proplist,
|
|
zlp->zl_type, zlp->zl_literal) != 0) {
|
|
zpool_close(zhp);
|
|
free(new);
|
|
return (-1);
|
|
}
|
|
new->zn_last_refresh = zlp->zl_last_refresh;
|
|
uu_avl_insert(zlp->zl_avl, new, idx);
|
|
} else {
|
|
node->zn_last_refresh = zlp->zl_last_refresh;
|
|
zpool_close(zhp);
|
|
free(new);
|
|
return (-1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* add_pool(), but always returns 0. This allows zpool_iter() to continue
|
|
* even if a pool exists in the tree, or we fail to get the properties for
|
|
* a new one.
|
|
*/
|
|
static int
|
|
add_pool_cb(zpool_handle_t *zhp, void *data)
|
|
{
|
|
(void) add_pool(zhp, data);
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Create a list of pools based on the given arguments. If we're given no
|
|
* arguments, then iterate over all pools in the system and add them to the AVL
|
|
* tree. Otherwise, add only those pool explicitly specified on the command
|
|
* line.
|
|
*/
|
|
zpool_list_t *
|
|
pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
|
|
boolean_t literal, int *err)
|
|
{
|
|
zpool_list_t *zlp;
|
|
|
|
zlp = safe_malloc(sizeof (zpool_list_t));
|
|
|
|
zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
|
|
offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
|
|
|
|
if (zlp->zl_pool == NULL)
|
|
zpool_no_memory();
|
|
|
|
if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
|
|
UU_DEFAULT)) == NULL)
|
|
zpool_no_memory();
|
|
|
|
zlp->zl_proplist = proplist;
|
|
zlp->zl_type = type;
|
|
|
|
zlp->zl_literal = literal;
|
|
zlp->zl_last_refresh = gethrtime();
|
|
|
|
if (argc == 0) {
|
|
(void) zpool_iter(g_zfs, add_pool_cb, zlp);
|
|
zlp->zl_findall = B_TRUE;
|
|
} else {
|
|
int i;
|
|
|
|
for (i = 0; i < argc; i++) {
|
|
zpool_handle_t *zhp;
|
|
|
|
if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
|
|
NULL) {
|
|
if (add_pool(zhp, zlp) != 0)
|
|
*err = B_TRUE;
|
|
} else {
|
|
*err = B_TRUE;
|
|
}
|
|
}
|
|
}
|
|
|
|
return (zlp);
|
|
}
|
|
|
|
/*
|
|
* Refresh the state of all pools on the list. Additionally, if no options were
|
|
* given on the command line, add any new pools and remove any that are no
|
|
* longer available.
|
|
*/
|
|
int
|
|
pool_list_refresh(zpool_list_t *zlp)
|
|
{
|
|
zlp->zl_last_refresh = gethrtime();
|
|
|
|
if (!zlp->zl_findall) {
|
|
/*
|
|
* This list is a fixed list of pools, so we must not add
|
|
* or remove any. Just walk over them and refresh their
|
|
* state.
|
|
*/
|
|
int navail = 0;
|
|
for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
|
|
node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
|
|
boolean_t missing;
|
|
zpool_refresh_stats(node->zn_handle, &missing);
|
|
navail += !missing;
|
|
node->zn_last_refresh = zlp->zl_last_refresh;
|
|
}
|
|
return (navail);
|
|
}
|
|
|
|
/*
|
|
* Search for any new pools and add them to the list. zpool_iter()
|
|
* will call zpool_refresh_stats() as part of its work, so this has
|
|
* the side effect of updating all active handles.
|
|
*/
|
|
(void) zpool_iter(g_zfs, add_pool_cb, zlp);
|
|
|
|
/*
|
|
* Walk the list for any that weren't refreshed, and update and remove
|
|
* them. It's not enough to just skip available ones, as zpool_iter()
|
|
* won't update them, so they'll still appear active in our list.
|
|
*/
|
|
zpool_node_t *node, *next;
|
|
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
|
|
next = uu_avl_next(zlp->zl_avl, node);
|
|
|
|
/*
|
|
* Skip any that were refreshed and are online; they're already
|
|
* handled.
|
|
*/
|
|
if (node->zn_last_refresh == zlp->zl_last_refresh &&
|
|
zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
|
|
continue;
|
|
|
|
/* Do the refresh ourselves, just in case. */
|
|
boolean_t missing;
|
|
zpool_refresh_stats(node->zn_handle, &missing);
|
|
if (missing) {
|
|
uu_avl_remove(zlp->zl_avl, node);
|
|
zpool_close(node->zn_handle);
|
|
free(node);
|
|
} else {
|
|
node->zn_last_refresh = zlp->zl_last_refresh;
|
|
}
|
|
}
|
|
|
|
return (uu_avl_numnodes(zlp->zl_avl));
|
|
}
|
|
|
|
/*
|
|
* Iterate over all pools in the list, executing the callback for each
|
|
*/
|
|
int
|
|
pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
|
|
void *data)
|
|
{
|
|
zpool_node_t *node, *next_node;
|
|
int ret = 0;
|
|
|
|
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
|
|
next_node = uu_avl_next(zlp->zl_avl, node);
|
|
if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
|
|
unavail)
|
|
ret |= func(node->zn_handle, data);
|
|
}
|
|
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* Free all the handles associated with this list.
|
|
*/
|
|
void
|
|
pool_list_free(zpool_list_t *zlp)
|
|
{
|
|
uu_avl_walk_t *walk;
|
|
zpool_node_t *node;
|
|
|
|
if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
|
|
(void) fprintf(stderr,
|
|
gettext("internal error: out of memory"));
|
|
exit(1);
|
|
}
|
|
|
|
while ((node = uu_avl_walk_next(walk)) != NULL) {
|
|
uu_avl_remove(zlp->zl_avl, node);
|
|
zpool_close(node->zn_handle);
|
|
free(node);
|
|
}
|
|
|
|
uu_avl_walk_end(walk);
|
|
uu_avl_destroy(zlp->zl_avl);
|
|
uu_avl_pool_destroy(zlp->zl_pool);
|
|
|
|
free(zlp);
|
|
}
|
|
|
|
/*
|
|
* Returns the number of elements in the pool list.
|
|
*/
|
|
int
|
|
pool_list_count(zpool_list_t *zlp)
|
|
{
|
|
return (uu_avl_numnodes(zlp->zl_avl));
|
|
}
|
|
|
|
/*
|
|
* High level function which iterates over all pools given on the command line,
|
|
* using the pool_list_* interfaces.
|
|
*/
|
|
int
|
|
for_each_pool(int argc, char **argv, boolean_t unavail,
|
|
zprop_list_t **proplist, zfs_type_t type, boolean_t literal,
|
|
zpool_iter_f func, void *data)
|
|
{
|
|
zpool_list_t *list;
|
|
int ret = 0;
|
|
|
|
if ((list = pool_list_get(argc, argv, proplist, type, literal,
|
|
&ret)) == NULL)
|
|
return (1);
|
|
|
|
if (pool_list_iter(list, unavail, func, data) != 0)
|
|
ret = 1;
|
|
|
|
pool_list_free(list);
|
|
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* This is the equivalent of for_each_pool() for vdevs. It iterates thorough
|
|
* all vdevs in the pool, ignoring root vdevs and holes, calling func() on
|
|
* each one.
|
|
*
|
|
* @zhp: Zpool handle
|
|
* @func: Function to call on each vdev
|
|
* @data: Custom data to pass to the function
|
|
*/
|
|
int
|
|
for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
|
|
{
|
|
nvlist_t *config, *nvroot = NULL;
|
|
|
|
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
|
|
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
|
&nvroot) == 0);
|
|
}
|
|
return (for_each_vdev_cb((void *) zhp, nvroot, func, data));
|
|
}
|
|
|
|
/*
|
|
* Process the vcdl->vdev_cmd_data[] array to figure out all the unique column
|
|
* names and their widths. When this function is done, vcdl->uniq_cols,
|
|
* vcdl->uniq_cols_cnt, and vcdl->uniq_cols_width will be filled in.
|
|
*/
|
|
static void
|
|
process_unique_cmd_columns(vdev_cmd_data_list_t *vcdl)
|
|
{
|
|
char **uniq_cols = NULL, **tmp = NULL;
|
|
int *uniq_cols_width;
|
|
vdev_cmd_data_t *data;
|
|
int cnt = 0;
|
|
int k;
|
|
|
|
/* For each vdev */
|
|
for (int i = 0; i < vcdl->count; i++) {
|
|
data = &vcdl->data[i];
|
|
/* For each column the vdev reported */
|
|
for (int j = 0; j < data->cols_cnt; j++) {
|
|
/* Is this column in our list of unique column names? */
|
|
for (k = 0; k < cnt; k++) {
|
|
if (strcmp(data->cols[j], uniq_cols[k]) == 0)
|
|
break; /* yes it is */
|
|
}
|
|
if (k == cnt) {
|
|
/* No entry for column, add to list */
|
|
tmp = realloc(uniq_cols, sizeof (*uniq_cols) *
|
|
(cnt + 1));
|
|
if (tmp == NULL)
|
|
break; /* Nothing we can do... */
|
|
uniq_cols = tmp;
|
|
uniq_cols[cnt] = data->cols[j];
|
|
cnt++;
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
* We now have a list of all the unique column names. Figure out the
|
|
* max width of each column by looking at the column name and all its
|
|
* values.
|
|
*/
|
|
uniq_cols_width = safe_malloc(sizeof (*uniq_cols_width) * cnt);
|
|
for (int i = 0; i < cnt; i++) {
|
|
/* Start off with the column title's width */
|
|
uniq_cols_width[i] = strlen(uniq_cols[i]);
|
|
/* For each vdev */
|
|
for (int j = 0; j < vcdl->count; j++) {
|
|
/* For each of the vdev's values in a column */
|
|
data = &vcdl->data[j];
|
|
for (k = 0; k < data->cols_cnt; k++) {
|
|
/* Does this vdev have a value for this col? */
|
|
if (strcmp(data->cols[k], uniq_cols[i]) == 0) {
|
|
/* Is the value width larger? */
|
|
uniq_cols_width[i] =
|
|
MAX(uniq_cols_width[i],
|
|
strlen(data->lines[k]));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
vcdl->uniq_cols = uniq_cols;
|
|
vcdl->uniq_cols_cnt = cnt;
|
|
vcdl->uniq_cols_width = uniq_cols_width;
|
|
}
|
|
|
|
|
|
/*
|
|
* Process a line of command output
|
|
*
|
|
* When running 'zpool iostat|status -c' the lines of output can either be
|
|
* in the form of:
|
|
*
|
|
* column_name=value
|
|
*
|
|
* Or just:
|
|
*
|
|
* value
|
|
*
|
|
* Process the column_name (if any) and value.
|
|
*
|
|
* Returns 0 if line was processed, and there are more lines can still be
|
|
* processed.
|
|
*
|
|
* Returns 1 if this was the last line to process, or error.
|
|
*/
|
|
static int
|
|
vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
|
|
{
|
|
char *col;
|
|
char *val;
|
|
char *equals;
|
|
char **tmp;
|
|
|
|
if (line == NULL)
|
|
return (1);
|
|
|
|
equals = strchr(line, '=');
|
|
if (equals != NULL) {
|
|
/*
|
|
* We have a 'column=value' type line. Split it into the
|
|
* column and value strings by turning the '=' into a '\0'.
|
|
*/
|
|
*equals = '\0';
|
|
col = line;
|
|
val = equals + 1;
|
|
} else {
|
|
col = NULL;
|
|
val = line;
|
|
}
|
|
|
|
/* Do we already have a column by this name? If so, skip it. */
|
|
if (col != NULL) {
|
|
for (int i = 0; i < data->cols_cnt; i++) {
|
|
if (strcmp(col, data->cols[i]) == 0)
|
|
return (0); /* Duplicate, skip */
|
|
}
|
|
}
|
|
|
|
if (val != NULL) {
|
|
tmp = realloc(data->lines,
|
|
(data->lines_cnt + 1) * sizeof (*data->lines));
|
|
if (tmp == NULL)
|
|
return (1);
|
|
|
|
data->lines = tmp;
|
|
data->lines[data->lines_cnt] = strdup(val);
|
|
data->lines_cnt++;
|
|
}
|
|
|
|
if (col != NULL) {
|
|
tmp = realloc(data->cols,
|
|
(data->cols_cnt + 1) * sizeof (*data->cols));
|
|
if (tmp == NULL)
|
|
return (1);
|
|
|
|
data->cols = tmp;
|
|
data->cols[data->cols_cnt] = strdup(col);
|
|
data->cols_cnt++;
|
|
}
|
|
|
|
if (val != NULL && col == NULL)
|
|
return (1);
|
|
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Run the cmd and store results in *data.
|
|
*/
|
|
static void
|
|
vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
|
|
{
|
|
int rc;
|
|
char *argv[2] = {cmd};
|
|
char **env;
|
|
char **lines = NULL;
|
|
int lines_cnt = 0;
|
|
int i;
|
|
|
|
env = zpool_vdev_script_alloc_env(data->pool, data->path, data->upath,
|
|
data->vdev_enc_sysfs_path, NULL, NULL);
|
|
if (env == NULL)
|
|
goto out;
|
|
|
|
/* Run the command */
|
|
rc = libzfs_run_process_get_stdout_nopath(cmd, argv, env, &lines,
|
|
&lines_cnt);
|
|
|
|
zpool_vdev_script_free_env(env);
|
|
|
|
if (rc != 0)
|
|
goto out;
|
|
|
|
/* Process the output we got */
|
|
for (i = 0; i < lines_cnt; i++)
|
|
if (vdev_process_cmd_output(data, lines[i]) != 0)
|
|
break;
|
|
|
|
out:
|
|
if (lines != NULL)
|
|
libzfs_free_str_array(lines, lines_cnt);
|
|
}
|
|
|
|
/*
|
|
* Generate the search path for zpool iostat/status -c scripts.
|
|
* The string returned must be freed.
|
|
*/
|
|
char *
|
|
zpool_get_cmd_search_path(void)
|
|
{
|
|
const char *env;
|
|
char *sp = NULL;
|
|
|
|
env = getenv("ZPOOL_SCRIPTS_PATH");
|
|
if (env != NULL)
|
|
return (strdup(env));
|
|
|
|
env = getenv("HOME");
|
|
if (env != NULL) {
|
|
if (asprintf(&sp, "%s/.zpool.d:%s",
|
|
env, ZPOOL_SCRIPTS_DIR) != -1) {
|
|
return (sp);
|
|
}
|
|
}
|
|
|
|
if (asprintf(&sp, "%s", ZPOOL_SCRIPTS_DIR) != -1)
|
|
return (sp);
|
|
|
|
return (NULL);
|
|
}
|
|
|
|
/* Thread function run for each vdev */
|
|
static void
|
|
vdev_run_cmd_thread(void *cb_cmd_data)
|
|
{
|
|
vdev_cmd_data_t *data = cb_cmd_data;
|
|
char *cmd = NULL, *cmddup, *cmdrest;
|
|
|
|
cmddup = strdup(data->cmd);
|
|
if (cmddup == NULL)
|
|
return;
|
|
|
|
cmdrest = cmddup;
|
|
while ((cmd = strtok_r(cmdrest, ",", &cmdrest))) {
|
|
char *dir = NULL, *sp, *sprest;
|
|
char fullpath[MAXPATHLEN];
|
|
|
|
if (strchr(cmd, '/') != NULL)
|
|
continue;
|
|
|
|
sp = zpool_get_cmd_search_path();
|
|
if (sp == NULL)
|
|
continue;
|
|
|
|
sprest = sp;
|
|
while ((dir = strtok_r(sprest, ":", &sprest))) {
|
|
if (snprintf(fullpath, sizeof (fullpath),
|
|
"%s/%s", dir, cmd) == -1)
|
|
continue;
|
|
|
|
if (access(fullpath, X_OK) == 0) {
|
|
vdev_run_cmd(data, fullpath);
|
|
break;
|
|
}
|
|
}
|
|
free(sp);
|
|
}
|
|
free(cmddup);
|
|
}
|
|
|
|
/* For each vdev in the pool run a command */
|
|
static int
|
|
for_each_vdev_run_cb(void *zhp_data, nvlist_t *nv, void *cb_vcdl)
|
|
{
|
|
vdev_cmd_data_list_t *vcdl = cb_vcdl;
|
|
vdev_cmd_data_t *data;
|
|
const char *path = NULL;
|
|
char *vname = NULL;
|
|
const char *vdev_enc_sysfs_path = NULL;
|
|
int i, match = 0;
|
|
zpool_handle_t *zhp = zhp_data;
|
|
|
|
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
|
|
return (1);
|
|
|
|
/* Make sure we're getting the updated enclosure sysfs path */
|
|
update_vdev_config_dev_sysfs_path(nv, path,
|
|
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
|
|
|
|
nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
|
|
&vdev_enc_sysfs_path);
|
|
|
|
/* Spares show more than once if they're in use, so skip if exists */
|
|
for (i = 0; i < vcdl->count; i++) {
|
|
if ((strcmp(vcdl->data[i].path, path) == 0) &&
|
|
(strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
|
|
/* vdev already exists, skip it */
|
|
return (0);
|
|
}
|
|
}
|
|
|
|
/* Check for selected vdevs here, if any */
|
|
for (i = 0; i < vcdl->vdev_names_count; i++) {
|
|
vname = zpool_vdev_name(g_zfs, zhp, nv, vcdl->cb_name_flags);
|
|
if (strcmp(vcdl->vdev_names[i], vname) == 0) {
|
|
free(vname);
|
|
match = 1;
|
|
break; /* match */
|
|
}
|
|
free(vname);
|
|
}
|
|
|
|
/* If we selected vdevs, and this isn't one of them, then bail out */
|
|
if (!match && vcdl->vdev_names_count)
|
|
return (0);
|
|
|
|
/*
|
|
* Resize our array and add in the new element.
|
|
*/
|
|
if (!(vcdl->data = realloc(vcdl->data,
|
|
sizeof (*vcdl->data) * (vcdl->count + 1))))
|
|
return (ENOMEM); /* couldn't realloc */
|
|
|
|
data = &vcdl->data[vcdl->count];
|
|
|
|
data->pool = strdup(zpool_get_name(zhp));
|
|
data->path = strdup(path);
|
|
data->upath = zfs_get_underlying_path(path);
|
|
data->cmd = vcdl->cmd;
|
|
data->lines = data->cols = NULL;
|
|
data->lines_cnt = data->cols_cnt = 0;
|
|
if (vdev_enc_sysfs_path)
|
|
data->vdev_enc_sysfs_path = strdup(vdev_enc_sysfs_path);
|
|
else
|
|
data->vdev_enc_sysfs_path = NULL;
|
|
|
|
vcdl->count++;
|
|
|
|
return (0);
|
|
}
|
|
|
|
/* Get the names and count of the vdevs */
|
|
static int
|
|
all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
|
|
{
|
|
return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
|
|
}
|
|
|
|
/*
|
|
* Now that vcdl is populated with our complete list of vdevs, spawn
|
|
* off the commands.
|
|
*/
|
|
static void
|
|
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
|
|
{
|
|
tpool_t *t;
|
|
|
|
t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
|
|
if (t == NULL)
|
|
return;
|
|
|
|
/* Spawn off the command for each vdev */
|
|
for (int i = 0; i < vcdl->count; i++) {
|
|
(void) tpool_dispatch(t, vdev_run_cmd_thread,
|
|
(void *) &vcdl->data[i]);
|
|
}
|
|
|
|
/* Wait for threads to finish */
|
|
tpool_wait(t);
|
|
tpool_destroy(t);
|
|
}
|
|
|
|
/*
|
|
* Run command 'cmd' on all vdevs in all pools in argv. Saves the first line of
|
|
* output from the command in vcdk->data[].line for all vdevs. If you want
|
|
* to run the command on only certain vdevs, fill in g_zfs, vdev_names,
|
|
* vdev_names_count, and cb_name_flags. Otherwise leave them as zero.
|
|
*
|
|
* Returns a vdev_cmd_data_list_t that must be freed with
|
|
* free_vdev_cmd_data_list();
|
|
*/
|
|
vdev_cmd_data_list_t *
|
|
all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
|
|
libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
|
|
int cb_name_flags)
|
|
{
|
|
vdev_cmd_data_list_t *vcdl;
|
|
vcdl = safe_malloc(sizeof (vdev_cmd_data_list_t));
|
|
vcdl->cmd = cmd;
|
|
|
|
vcdl->vdev_names = vdev_names;
|
|
vcdl->vdev_names_count = vdev_names_count;
|
|
vcdl->cb_name_flags = cb_name_flags;
|
|
vcdl->g_zfs = g_zfs;
|
|
|
|
/* Gather our list of all vdevs in all pools */
|
|
for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
|
|
B_FALSE, all_pools_for_each_vdev_gather_cb, vcdl);
|
|
|
|
/* Run command on all vdevs in all pools */
|
|
all_pools_for_each_vdev_run_vcdl(vcdl);
|
|
|
|
/*
|
|
* vcdl->data[] now contains all the column names and values for each
|
|
* vdev. We need to process that into a master list of unique column
|
|
* names, and figure out the width of each column.
|
|
*/
|
|
process_unique_cmd_columns(vcdl);
|
|
|
|
return (vcdl);
|
|
}
|
|
|
|
/*
|
|
* Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
|
|
*/
|
|
void
|
|
free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
|
|
{
|
|
free(vcdl->uniq_cols);
|
|
free(vcdl->uniq_cols_width);
|
|
|
|
for (int i = 0; i < vcdl->count; i++) {
|
|
free(vcdl->data[i].path);
|
|
free(vcdl->data[i].pool);
|
|
free(vcdl->data[i].upath);
|
|
|
|
for (int j = 0; j < vcdl->data[i].lines_cnt; j++)
|
|
free(vcdl->data[i].lines[j]);
|
|
|
|
free(vcdl->data[i].lines);
|
|
|
|
for (int j = 0; j < vcdl->data[i].cols_cnt; j++)
|
|
free(vcdl->data[i].cols[j]);
|
|
|
|
free(vcdl->data[i].cols);
|
|
free(vcdl->data[i].vdev_enc_sysfs_path);
|
|
}
|
|
free(vcdl->data);
|
|
free(vcdl);
|
|
}
|