Add ability to scrub from last scrubbed txg

Some users might want to scrub only new data because they would like
to know if the new write wasn't corrupted.  This PR adds possibility
scrub only newly written data.

This introduces new `last_scrubbed_txg` property, indicating the
transaction group (TXG) up to which the most recent scrub operation
has checked and repaired the dataset, so users can run scrub only
from the last saved point. We use a scn_max_txg and scn_min_txg
which are already built into scrub, to accomplish that.

Reviewed-by: Allan Jude <allan@klarasystems.com>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Mariusz Zaborski <mariusz.zaborski@klarasystems.com>
Sponsored-By: Wasabi Technology, Inc.
Sponsored-By: Klara Inc.
Closes #16301
This commit is contained in:
Mariusz Zaborski 2024-12-04 20:21:45 +01:00 committed by Brian Behlendorf
parent 5988de77b0
commit 3b0c1131ef
19 changed files with 264 additions and 35 deletions

View File

@ -512,7 +512,8 @@ get_usage(zpool_help_t idx)
return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
return (gettext("\tscrub [-e | -s | -p | -C] [-w] "
"<pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
@ -8429,12 +8430,13 @@ wait_callback(zpool_handle_t *zhp, void *data)
}
/*
* zpool scrub [-s | -p] [-w] [-e] <pool> ...
* zpool scrub [-e | -s | -p | -C] [-w] <pool> ...
*
* -e Only scrub blocks in the error log.
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
* -C Scrub from last saved txg.
*/
int
zpool_do_scrub(int argc, char **argv)
@ -8450,9 +8452,10 @@ zpool_do_scrub(int argc, char **argv)
boolean_t is_error_scrub = B_FALSE;
boolean_t is_pause = B_FALSE;
boolean_t is_stop = B_FALSE;
boolean_t is_txg_continue = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "spwe")) != -1) {
while ((c = getopt(argc, argv, "spweC")) != -1) {
switch (c) {
case 'e':
is_error_scrub = B_TRUE;
@ -8466,6 +8469,9 @@ zpool_do_scrub(int argc, char **argv)
case 'w':
wait = B_TRUE;
break;
case 'C':
is_txg_continue = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -8477,6 +8483,18 @@ zpool_do_scrub(int argc, char **argv)
(void) fprintf(stderr, gettext("invalid option "
"combination :-s and -p are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_pause && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
"combination :-p and -C are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_stop && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
"combination :-s and -C are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_error_scrub && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
"combination :-e and -C are mutually exclusive\n"));
usage(B_FALSE);
} else {
if (is_error_scrub)
cb.cb_type = POOL_SCAN_ERRORSCRUB;
@ -8485,6 +8503,8 @@ zpool_do_scrub(int argc, char **argv)
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
} else if (is_stop) {
cb.cb_type = POOL_SCAN_NONE;
} else if (is_txg_continue) {
cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
} else {
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
}

View File

@ -381,6 +381,7 @@ typedef struct dmu_buf {
#define DMU_POOL_CREATION_VERSION "creation_version"
#define DMU_POOL_SCAN "scan"
#define DMU_POOL_ERRORSCRUB "error_scrub"
#define DMU_POOL_LAST_SCRUBBED_TXG "last_scrubbed_txg"
#define DMU_POOL_FREE_BPOBJ "free_bpobj"
#define DMU_POOL_BPTREE_OBJ "bptree_obj"
#define DMU_POOL_EMPTY_BPOBJ "empty_bpobj"

View File

@ -179,6 +179,12 @@ typedef struct dsl_scan {
dsl_errorscrub_phys_t errorscrub_phys;
} dsl_scan_t;
typedef struct {
pool_scan_func_t func;
uint64_t txgstart;
uint64_t txgend;
} setup_sync_arg_t;
typedef struct dsl_scan_io_queue dsl_scan_io_queue_t;
void scan_init(void);
@ -189,7 +195,8 @@ void dsl_scan_setup_sync(void *, dmu_tx_t *);
void dsl_scan_fini(struct dsl_pool *dp);
void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *);
int dsl_scan_cancel(struct dsl_pool *);
int dsl_scan(struct dsl_pool *, pool_scan_func_t);
int dsl_scan(struct dsl_pool *, pool_scan_func_t, uint64_t starttxg,
uint64_t txgend);
void dsl_scan_assess_vdev(struct dsl_pool *dp, vdev_t *vd);
boolean_t dsl_scan_scrubbing(const struct dsl_pool *dp);
boolean_t dsl_errorscrubbing(const struct dsl_pool *dp);

View File

@ -265,6 +265,7 @@ typedef enum {
ZPOOL_PROP_DEDUP_TABLE_SIZE,
ZPOOL_PROP_DEDUP_TABLE_QUOTA,
ZPOOL_PROP_DEDUPCACHED,
ZPOOL_PROP_LAST_SCRUBBED_TXG,
ZPOOL_NUM_PROPS
} zpool_prop_t;
@ -1088,6 +1089,7 @@ typedef enum pool_scan_func {
typedef enum pool_scrub_cmd {
POOL_SCRUB_NORMAL = 0,
POOL_SCRUB_PAUSE,
POOL_SCRUB_FROM_LAST_TXG,
POOL_SCRUB_FLAGS_END
} pool_scrub_cmd_t;

View File

@ -822,6 +822,8 @@ extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
@ -1080,6 +1082,7 @@ extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_get_last_scrubbed_txg(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern space_map_t *spa_syncing_log_sm(spa_t *spa);

View File

@ -318,6 +318,7 @@ struct spa {
uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
uint64_t spa_scan_pass_exam; /* examined bytes per pass */
uint64_t spa_scan_pass_issued; /* issued bytes per pass */
uint64_t spa_scrubbed_last_txg; /* last txg scrubbed */
/* error scrub pause time in milliseconds */
uint64_t spa_scan_pass_errorscrub_pause;

View File

@ -3132,7 +3132,8 @@
<enumerator name='ZPOOL_PROP_DEDUP_TABLE_SIZE' value='36'/>
<enumerator name='ZPOOL_PROP_DEDUP_TABLE_QUOTA' value='37'/>
<enumerator name='ZPOOL_PROP_DEDUPCACHED' value='38'/>
<enumerator name='ZPOOL_NUM_PROPS' value='39'/>
<enumerator name='ZPOOL_PROP_LAST_SCRUBBED_TXG' value='39'/>
<enumerator name='ZPOOL_NUM_PROPS' value='40'/>
</enum-decl>
<typedef-decl name='zpool_prop_t' type-id='af1ba157' id='5d0c23fb'/>
<typedef-decl name='regoff_t' type-id='95e97e5e' id='54a2a2a8'/>
@ -5984,7 +5985,8 @@
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCRUB_NORMAL' value='0'/>
<enumerator name='POOL_SCRUB_PAUSE' value='1'/>
<enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
<enumerator name='POOL_SCRUB_FROM_LAST_TXG' value='2'/>
<enumerator name='POOL_SCRUB_FLAGS_END' value='3'/>
</enum-decl>
<typedef-decl name='pool_scrub_cmd_t' type-id='a1474cbd' id='b51cf3c2'/>
<enum-decl name='zpool_errata' id='d9abbf54'>

View File

@ -28,7 +28,7 @@
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\" Copyright (c) 2023, Klara Inc.
.\"
.Dd July 29, 2024
.Dd November 18, 2024
.Dt ZPOOLPROPS 7
.Os
.
@ -135,6 +135,19 @@ A unique identifier for the pool.
The current health of the pool.
Health can be one of
.Sy ONLINE , DEGRADED , FAULTED , OFFLINE, REMOVED , UNAVAIL .
.It Sy last_scrubbed_txg
Indicates the transaction group (TXG) up to which the most recent scrub
operation has checked and repaired the dataset.
This provides insight into the data integrity status of their pool at
a specific point in time.
.Xr zpool-scrub 8
can utilize this property to scan only data that has changed since the last
scrub completed, when given the
.Fl C
flag.
This property is not updated when performing an error scrub with the
.Fl e
flag.
.It Sy leaked
Space not released while
.Sy freeing

View File

@ -26,7 +26,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd June 22, 2023
.Dd November 18, 2024
.Dt ZPOOL-SCRUB 8
.Os
.
@ -36,9 +36,8 @@
.Sh SYNOPSIS
.Nm zpool
.Cm scrub
.Op Fl s Ns | Ns Fl p
.Op Ns Fl e | Ns Fl p | Fl s Ns | Fl C Ns
.Op Fl w
.Op Fl e
.Ar pool Ns
.
.Sh DESCRIPTION
@ -114,6 +113,10 @@ The pool must have been scrubbed at least once with the
feature enabled to use this option.
Error scrubbing cannot be run simultaneously with regular scrubbing or
resilvering, nor can it be run when a regular scrub is paused.
.It Fl C
Continue scrub from last saved txg (see zpool
.Sy last_scrubbed_txg
property).
.El
.Sh EXAMPLES
.Ss Example 1

View File

@ -128,6 +128,9 @@ zpool_prop_init(void)
zprop_register_number(ZPOOL_PROP_DEDUP_TABLE_SIZE, "dedup_table_size",
0, PROP_READONLY, ZFS_TYPE_POOL, "<size>", "DDTSIZE", B_FALSE,
sfeatures);
zprop_register_number(ZPOOL_PROP_LAST_SCRUBBED_TXG,
"last_scrubbed_txg", 0, PROP_READONLY, ZFS_TYPE_POOL, "<txg>",
"LAST_SCRUBBED_TXG", B_FALSE, sfeatures);
/* default number properties */
zprop_register_number(ZPOOL_PROP_VERSION, "version", SPA_VERSION,

View File

@ -231,6 +231,9 @@ static uint_t zfs_resilver_defer_percent = 10;
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
#define DSL_SCAN_IS_SCRUB(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB)
/*
* Enable/disable the processing of the free_bpobj object.
*/
@ -855,15 +858,15 @@ dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
setup_sync_arg_t *setup_sync_arg = (setup_sync_arg_t *)arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
ASSERT3U(setup_sync_arg->func, >, POOL_SCAN_NONE);
ASSERT3U(setup_sync_arg->func, <, POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
/*
@ -873,10 +876,14 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
dsl_errorscrub_sync_state(scn, tx);
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_func = setup_sync_arg->func;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_min_txg = setup_sync_arg->txgstart;
if (setup_sync_arg->txgend == 0) {
scn->scn_phys.scn_max_txg = tx->tx_txg;
} else {
scn->scn_phys.scn_max_txg = setup_sync_arg->txgend;
}
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
@ -963,7 +970,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
setup_sync_arg->func, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
@ -973,10 +980,16 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
* error scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
setup_sync_arg_t setup_sync_arg;
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0)) {
return (EINVAL);
}
/*
* Purge all vdev caches and probe all devices. We do this here
@ -1027,8 +1040,13 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
return (SET_ERROR(err));
}
setup_sync_arg.func = func;
setup_sync_arg.txgstart = txgstart;
setup_sync_arg.txgend = txgend;
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
dsl_scan_setup_sync, &setup_sync_arg, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static void
@ -1116,15 +1134,24 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
if (dsl_scan_restarting(scn, tx)) {
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else if (!complete)
} else if (!complete) {
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else
} else {
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB(scn)) {
VERIFY0(zap_update(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LAST_SCRUBBED_TXG,
sizeof (uint64_t), 1,
&scn->scn_phys.scn_max_txg, tx));
spa->spa_scrubbed_last_txg = scn->scn_phys.scn_max_txg;
}
}
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
@ -4330,14 +4357,18 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
* current scan progress is below zfs_resilver_defer_percent.
*/
if (dsl_scan_restarting(scn, tx) || restart_early) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
setup_sync_arg.func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu early=%d",
func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg,
restart_early);
dsl_scan_setup_sync(&func, tx);
setup_sync_arg.func, dp->dp_spa->spa_name,
(longlong_t)tx->tx_txg, restart_early);
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
/*

View File

@ -451,9 +451,10 @@ spa_prop_get_config(spa_t *spa, nvlist_t *nv)
spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
ddt_get_ddt_dsize(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
spa_prop_add_list(nv, ZPOOL_PROP_LAST_SCRUBBED_TXG, NULL,
spa_get_last_scrubbed_txg(spa), src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
@ -4727,6 +4728,12 @@ spa_ld_get_props(spa_t *spa)
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/* Load the last scrubbed txg. */
error = spa_dir_prop(spa, DMU_POOL_LAST_SCRUBBED_TXG,
&spa->spa_scrubbed_last_txg, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
@ -8869,6 +8876,13 @@ spa_scan_stop(spa_t *spa)
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
return (spa_scan_range(spa, func, 0, 0));
}
int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
@ -8879,6 +8893,9 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
@ -8893,7 +8910,7 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
return (SET_ERROR(ENOTSUP));
return (dsl_scan(spa->spa_dsl_pool, func));
return (dsl_scan(spa->spa_dsl_pool, func, txgstart, txgend));
}
/*
@ -10976,6 +10993,7 @@ EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_range);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */

View File

@ -2676,6 +2676,12 @@ spa_mode(spa_t *spa)
return (spa->spa_mode);
}
uint64_t
spa_get_last_scrubbed_txg(spa_t *spa)
{
return (spa->spa_scrubbed_last_txg);
}
uint64_t
spa_bootfs(spa_t *spa)
{

View File

@ -3811,9 +3811,15 @@ raidz_reflow_complete_sync(void *arg, dmu_tx_t *tx)
* setup a scrub. All the data has been sucessfully copied
* but we have not validated any checksums.
*/
pool_scan_func_t func = POOL_SCAN_SCRUB;
if (zfs_scrub_after_expand && dsl_scan_setup_check(&func, tx) == 0)
dsl_scan_setup_sync(&func, tx);
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
if (zfs_scrub_after_expand &&
dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0) {
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
}
/*

View File

@ -345,10 +345,14 @@ vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx)
* While we're in syncing context take the opportunity to
* setup the scrub when there are no more active rebuilds.
*/
pool_scan_func_t func = POOL_SCAN_SCRUB;
if (dsl_scan_setup_check(&func, tx) == 0 &&
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
if (dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0 &&
zfs_rebuild_scrub_enabled) {
dsl_scan_setup_sync(&func, tx);
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
cv_broadcast(&vd->vdev_rebuild_cv);

View File

@ -1718,6 +1718,9 @@ zfs_ioc_pool_scrub(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE);
} else if (scan_type == POOL_SCAN_NONE) {
error = spa_scan_stop(spa);
} else if (scan_cmd == POOL_SCRUB_FROM_LAST_TXG) {
error = spa_scan_range(spa, scan_type,
spa_get_last_scrubbed_txg(spa), 0);
} else {
error = spa_scan(spa, scan_type);
}

View File

@ -1225,6 +1225,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_txg_continue_from_last.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \

View File

@ -63,6 +63,7 @@ typeset -a properties=(
"bcloneused"
"bclonesaved"
"bcloneratio"
"last_scrubbed_txg"
"feature@async_destroy"
"feature@empty_bpobj"
"feature@lz4_compress"

View File

@ -0,0 +1,104 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2023, Klara Inc.
#
# This software was developed by
# Mariusz Zaborski <mariusz.zaborski@klarasystems.com>
# under sponsorship from Wasabi Technology, Inc. and Klara Inc.
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
. $STF_SUITE/tests/functional/cli_root/zpool_import/zpool_import.kshlib
#
# DESCRIPTION:
# Verify scrub -C
#
# STRATEGY:
# 1. Create a pool and create one file.
# 2. Verify that the last_txg_scrub is 0.
# 3. Run scrub.
# 4. Verify that the last_txg_scrub is set.
# 5. Create second file.
# 6. Invalidate both files.
# 7. Run scrub only from last point.
# 8. Verify that only one file, that was created with newer txg,
# was detected.
#
verify_runnable "global"
function cleanup
{
log_must zinject -c all
log_must rm -f $mntpnt/f1
log_must rm -f $mntpnt/f2
}
log_onexit cleanup
log_assert "Verify scrub -C."
# Create one file.
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 10 -o create -d 0 -f $mntpnt/f1
log_must sync_pool $TESTPOOL true
f1txg=$(get_last_txg_synced $TESTPOOL)
# Verify that last_scrubbed_txg isn't set.
zpoollasttxg=$(zpool get -H -o value last_scrubbed_txg $TESTPOOL)
log_must [ $zpoollasttxg -eq 0 ]
# Run scrub.
log_must zpool scrub -w $TESTPOOL
# Verify that last_scrubbed_txg is set.
zpoollasttxg=$(zpool get -H -o value last_scrubbed_txg $TESTPOOL)
log_must [ $zpoollasttxg -ne 0 ]
# Create second file.
log_must file_write -b 1048576 -c 10 -o create -d 0 -f $mntpnt/f2
log_must sync_pool $TESTPOOL true
f2txg=$(get_last_txg_synced $TESTPOOL)
# Make sure that the sync txg are different.
log_must [ $f1txg -ne $f2txg ]
# Insert faults.
log_must zinject -a -t data -e io -T read $mntpnt/f1
log_must zinject -a -t data -e io -T read $mntpnt/f2
# Run scrub from last saved point.
log_must zpool scrub -w -C $TESTPOOL
# Verify that only newer file was detected.
log_mustnot eval "zpool status -v $TESTPOOL | grep '$mntpnt/f1'"
log_must eval "zpool status -v $TESTPOOL | grep '$mntpnt/f2'"
# Verify that both files are corrupted.
log_must zpool scrub -w $TESTPOOL
log_must eval "zpool status -v $TESTPOOL | grep '$mntpnt/f1'"
log_must eval "zpool status -v $TESTPOOL | grep '$mntpnt/f2'"
log_pass "Verified scrub -C show expected status."