Linux 6.12 compat: Rename range_tree_* to zfs_range_tree_*

Linux 6.12 has conflicting range_tree_{find,destroy,clear} symbols.

Signed-off-by: Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Rob Norris <robn@despairlabs.com>
This commit is contained in:
Ivan Volosyuk 2025-01-30 21:26:49 +11:00 committed by Ameer Hamza
parent c4fa9c2962
commit 55b21552d3
29 changed files with 990 additions and 930 deletions

View File

@ -122,7 +122,7 @@ static int flagbits[256];
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */ static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0; static int leaked_objects = 0;
static range_tree_t *mos_refd_objs; static zfs_range_tree_t *mos_refd_objs;
static spa_t *spa; static spa_t *spa;
static objset_t *os; static objset_t *os;
static boolean_t kernel_init_done; static boolean_t kernel_init_done;
@ -325,7 +325,7 @@ typedef struct metaslab_verify {
/* /*
* What's currently allocated for this metaslab. * What's currently allocated for this metaslab.
*/ */
range_tree_t *mv_allocated; zfs_range_tree_t *mv_allocated;
} metaslab_verify_t; } metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg); typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
@ -417,7 +417,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
uint64_t txg = sme->sme_txg; uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) { if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated, if (zfs_range_tree_contains(mv->mv_allocated,
offset, size)) { offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: " (void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] " "%llu [%llx:%llx] "
@ -426,11 +426,11 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid, (u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid); (u_longlong_t)mv->mv_msid);
} else { } else {
range_tree_add(mv->mv_allocated, zfs_range_tree_add(mv->mv_allocated,
offset, size); offset, size);
} }
} else { } else {
if (!range_tree_contains(mv->mv_allocated, if (!zfs_range_tree_contains(mv->mv_allocated,
offset, size)) { offset, size)) {
(void) printf("ERROR: DOUBLE FREE: " (void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] " "%llu [%llx:%llx] "
@ -439,7 +439,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid, (u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid); (u_longlong_t)mv->mv_msid);
} else { } else {
range_tree_remove(mv->mv_allocated, zfs_range_tree_remove(mv->mv_allocated,
offset, size); offset, size);
} }
} }
@ -614,11 +614,11 @@ livelist_metaslab_validate(spa_t *spa)
(longlong_t)vd->vdev_ms_count); (longlong_t)vd->vdev_ms_count);
uint64_t shift, start; uint64_t shift, start;
range_seg_type_t type = zfs_range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m, metaslab_calculate_range_tree_type(vd, m,
&start, &shift); &start, &shift);
metaslab_verify_t mv; metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL, mv.mv_allocated = zfs_range_tree_create(NULL,
type, NULL, start, shift); type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id; mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id; mv.mv_msid = m->ms_id;
@ -633,8 +633,8 @@ livelist_metaslab_validate(spa_t *spa)
spacemap_check_ms_sm(m->ms_sm, &mv); spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv); spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL); zfs_range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated); zfs_range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs); zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs); zfs_btree_destroy(&mv.mv_livelist_allocs);
} }
@ -1633,9 +1633,9 @@ static void
dump_metaslab_stats(metaslab_t *msp) dump_metaslab_stats(metaslab_t *msp)
{ {
char maxbuf[32]; char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable; zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size; zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size; int free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */ /* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated"); _Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
@ -1668,7 +1668,7 @@ dump_metaslab(metaslab_t *msp)
if (dump_opt['m'] > 2 && !dump_opt['L']) { if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable); zfs_range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp); dump_metaslab_stats(msp);
metaslab_unload(msp); metaslab_unload(msp);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
@ -2292,12 +2292,12 @@ dump_dtl(vdev_t *vd, int indent)
required ? "DTL-required" : "DTL-expendable"); required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t]; zfs_range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0) if (zfs_range_tree_space(rt) == 0)
continue; continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s", (void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]); indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix); zfs_range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0) if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset, dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm); vd->vdev_dtl_sm);
@ -6258,9 +6258,9 @@ load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
return (0); return (0);
if (sme->sme_type == SM_ALLOC) if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size); zfs_range_tree_add(svr->svr_allocd_segs, offset, size);
else else
range_tree_remove(svr->svr_allocd_segs, offset, size); zfs_range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0); return (0);
} }
@ -6314,18 +6314,20 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs)); ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); zfs_range_tree_t *allocs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi]; metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs)); ASSERT0(zfs_range_tree_space(allocs));
if (msp->ms_sm != NULL) if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC)); VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs); zfs_range_tree_vacate(allocs, zfs_range_tree_add,
svr->svr_allocd_segs);
} }
range_tree_destroy(allocs); zfs_range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr); iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
@ -6334,12 +6336,12 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
* because we have not allocated mappings for * because we have not allocated mappings for
* it yet. * it yet.
*/ */
range_tree_clear(svr->svr_allocd_segs, zfs_range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim), vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim)); vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs); zcb->zcb_removing_size += zfs_range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd); zfs_range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG); spa_config_exit(spa, SCL_CONFIG, FTAG);
} }
@ -6442,7 +6444,8 @@ checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
* also verify that the entry is there to begin with. * also verify that the entry is there to begin with.
*/ */
mutex_enter(&ms->ms_lock); mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run); zfs_range_tree_remove(ms->ms_allocatable, sme->sme_offset,
sme->sme_run);
mutex_exit(&ms->ms_lock); mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run; cseea->cseea_checkpoint_size += sme->sme_run;
@ -6573,9 +6576,9 @@ load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
return (0); return (0);
if (*uic_maptype == sme->sme_type) if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size); zfs_range_tree_add(ms->ms_allocatable, offset, size);
else else
range_tree_remove(ms->ms_allocatable, offset, size); zfs_range_tree_remove(ms->ms_allocatable, offset, size);
return (0); return (0);
} }
@ -6609,7 +6612,7 @@ load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
(longlong_t)vd->vdev_ms_count); (longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL); zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/* /*
* We don't want to spend the CPU manipulating the * We don't want to spend the CPU manipulating the
@ -6642,7 +6645,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL); zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/* /*
* We don't want to spend the CPU manipulating the * We don't want to spend the CPU manipulating the
@ -6666,7 +6669,7 @@ load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
*/ */
ASSERT3U(ent_offset + ent_len, <=, ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size); msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len); zfs_range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
} }
if (!msp->ms_loaded) if (!msp->ms_loaded)
@ -6812,7 +6815,7 @@ zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
for (uint64_t inner_offset = 0; for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst); inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) { inner_offset += 1ULL << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable, if (zfs_range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) { offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift; obsolete_bytes += 1ULL << vd->vdev_ashift;
} }
@ -6895,10 +6898,10 @@ zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
* not referenced, which is not a bug. * not referenced, which is not a bug.
*/ */
if (vd->vdev_ops == &vdev_indirect_ops) { if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable, zfs_range_tree_vacate(msp->ms_allocatable,
NULL, NULL); NULL, NULL);
} else { } else {
range_tree_vacate(msp->ms_allocatable, zfs_range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd); zdb_leak, vd);
} }
if (msp->ms_loaded) { if (msp->ms_loaded) {
@ -7796,7 +7799,7 @@ verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
* their respective ms_allocateable trees should not contain them. * their respective ms_allocateable trees should not contain them.
*/ */
mutex_enter(&ms->ms_lock); mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable, zfs_range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run); sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock); mutex_exit(&ms->ms_lock);
@ -7947,8 +7950,9 @@ verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
* This way we ensure that none of the blocks that * This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake. * are part of the checkpoint were freed by mistake.
*/ */
range_tree_walk(ckpoint_msp->ms_allocatable, zfs_range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present, (zfs_range_tree_func_t *)
zfs_range_tree_verify_not_present,
current_msp->ms_allocatable); current_msp->ms_allocatable);
} }
} }
@ -8088,7 +8092,7 @@ static void
mos_obj_refd(uint64_t obj) mos_obj_refd(uint64_t obj)
{ {
if (obj != 0 && mos_refd_objs != NULL) if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1); zfs_range_tree_add(mos_refd_objs, obj, 1);
} }
/* /*
@ -8098,8 +8102,8 @@ static void
mos_obj_refd_multiple(uint64_t obj) mos_obj_refd_multiple(uint64_t obj)
{ {
if (obj != 0 && mos_refd_objs != NULL && if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1)) !zfs_range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1); zfs_range_tree_add(mos_refd_objs, obj, 1);
} }
static void static void
@ -8296,8 +8300,8 @@ dump_mos_leaks(spa_t *spa)
*/ */
uint64_t object = 0; uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) { while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) { if (zfs_range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1); zfs_range_tree_remove(mos_refd_objs, object, 1);
} else { } else {
dmu_object_info_t doi; dmu_object_info_t doi;
const char *name; const char *name;
@ -8315,11 +8319,11 @@ dump_mos_leaks(spa_t *spa)
rv = 2; rv = 2;
} }
} }
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL); (void) zfs_range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs)) if (!zfs_range_tree_is_empty(mos_refd_objs))
rv = 2; rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL); zfs_range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs); zfs_range_tree_destroy(mos_refd_objs);
return (rv); return (rv);
} }
@ -8441,8 +8445,8 @@ dump_zpool(spa_t *spa)
if (dump_opt['d'] || dump_opt['i']) { if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f; spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, mos_refd_objs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
0); NULL, 0, 0);
dump_objset(dp->dp_meta_objset); dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) { if (dump_opt['d'] >= 3) {

View File

@ -335,7 +335,7 @@ struct dnode {
/* protected by dn_mtx: */ /* protected by dn_mtx: */
kmutex_t dn_mtx; kmutex_t dn_mtx;
list_t dn_dirty_records[TXG_SIZE]; list_t dn_dirty_records[TXG_SIZE];
struct range_tree *dn_free_ranges[TXG_SIZE]; struct zfs_range_tree *dn_free_ranges[TXG_SIZE];
uint64_t dn_allocated_txg; uint64_t dn_allocated_txg;
uint64_t dn_free_txg; uint64_t dn_free_txg;
uint64_t dn_assigned_txg; uint64_t dn_assigned_txg;

View File

@ -139,7 +139,7 @@ void metaslab_set_selected_txg(metaslab_t *, uint64_t);
extern int metaslab_debug_load; extern int metaslab_debug_load;
range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev, zfs_range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev,
metaslab_t *msp, uint64_t *start, uint64_t *shift); metaslab_t *msp, uint64_t *start, uint64_t *shift);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -398,8 +398,8 @@ struct metaslab {
uint64_t ms_size; uint64_t ms_size;
uint64_t ms_fragmentation; uint64_t ms_fragmentation;
range_tree_t *ms_allocating[TXG_SIZE]; zfs_range_tree_t *ms_allocating[TXG_SIZE];
range_tree_t *ms_allocatable; zfs_range_tree_t *ms_allocatable;
uint64_t ms_allocated_this_txg; uint64_t ms_allocated_this_txg;
uint64_t ms_allocating_total; uint64_t ms_allocating_total;
@ -408,10 +408,12 @@ struct metaslab {
* ms_free*tree only have entries while syncing, and are empty * ms_free*tree only have entries while syncing, and are empty
* between syncs. * between syncs.
*/ */
range_tree_t *ms_freeing; /* to free this syncing txg */ zfs_range_tree_t *ms_freeing; /* to free this syncing txg */
range_tree_t *ms_freed; /* already freed this syncing txg */ /* already freed this syncing txg */
range_tree_t *ms_defer[TXG_DEFER_SIZE]; zfs_range_tree_t *ms_freed;
range_tree_t *ms_checkpointing; /* to add to the checkpoint */ zfs_range_tree_t *ms_defer[TXG_DEFER_SIZE];
/* to add to the checkpoint */
zfs_range_tree_t *ms_checkpointing;
/* /*
* The ms_trim tree is the set of allocatable segments which are * The ms_trim tree is the set of allocatable segments which are
@ -421,7 +423,7 @@ struct metaslab {
* is unloaded. Its purpose is to aggregate freed ranges to * is unloaded. Its purpose is to aggregate freed ranges to
* facilitate efficient trimming. * facilitate efficient trimming.
*/ */
range_tree_t *ms_trim; zfs_range_tree_t *ms_trim;
boolean_t ms_condensing; /* condensing? */ boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted; boolean_t ms_condense_wanted;
@ -542,8 +544,8 @@ struct metaslab {
* Allocs and frees that are committed to the vdev log spacemap but * Allocs and frees that are committed to the vdev log spacemap but
* not yet to this metaslab's spacemap. * not yet to this metaslab's spacemap.
*/ */
range_tree_t *ms_unflushed_allocs; zfs_range_tree_t *ms_unflushed_allocs;
range_tree_t *ms_unflushed_frees; zfs_range_tree_t *ms_unflushed_frees;
/* /*
* We have flushed entries up to but not including this TXG. In * We have flushed entries up to but not including this TXG. In

View File

@ -39,23 +39,23 @@ extern "C" {
#define RANGE_TREE_HISTOGRAM_SIZE 64 #define RANGE_TREE_HISTOGRAM_SIZE 64
typedef struct range_tree_ops range_tree_ops_t; typedef struct zfs_range_tree_ops zfs_range_tree_ops_t;
typedef enum range_seg_type { typedef enum zfs_range_seg_type {
RANGE_SEG32, ZFS_RANGE_SEG32,
RANGE_SEG64, ZFS_RANGE_SEG64,
RANGE_SEG_GAP, ZFS_RANGE_SEG_GAP,
RANGE_SEG_NUM_TYPES, ZFS_RANGE_SEG_NUM_TYPES,
} range_seg_type_t; } zfs_range_seg_type_t;
/* /*
* Note: the range_tree may not be accessed concurrently; consumers * Note: the range_tree may not be accessed concurrently; consumers
* must provide external locking if required. * must provide external locking if required.
*/ */
typedef struct range_tree { typedef struct zfs_range_tree {
zfs_btree_t rt_root; /* offset-ordered segment b-tree */ zfs_btree_t rt_root; /* offset-ordered segment b-tree */
uint64_t rt_space; /* sum of all segments in the map */ uint64_t rt_space; /* sum of all segments in the map */
range_seg_type_t rt_type; /* type of range_seg_t in use */ zfs_range_seg_type_t rt_type; /* type of zfs_range_seg_t in use */
/* /*
* All data that is stored in the range tree must have a start higher * All data that is stored in the range tree must have a start higher
* than or equal to rt_start, and all sizes and offsets must be * than or equal to rt_start, and all sizes and offsets must be
@ -63,7 +63,7 @@ typedef struct range_tree {
*/ */
uint8_t rt_shift; uint8_t rt_shift;
uint64_t rt_start; uint64_t rt_start;
const range_tree_ops_t *rt_ops; const zfs_range_tree_ops_t *rt_ops;
void *rt_arg; void *rt_arg;
uint64_t rt_gap; /* allowable inter-segment gap */ uint64_t rt_gap; /* allowable inter-segment gap */
@ -73,7 +73,7 @@ typedef struct range_tree {
* 2^i <= size of range in bytes < 2^(i+1) * 2^i <= size of range in bytes < 2^(i+1)
*/ */
uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE]; uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE];
} range_tree_t; } zfs_range_tree_t;
typedef struct range_seg32 { typedef struct range_seg32 {
uint32_t rs_start; /* starting offset of this segment */ uint32_t rs_start; /* starting offset of this segment */
@ -106,26 +106,26 @@ typedef range_seg_gap_t range_seg_max_t;
* pointer is to a range seg of some type; when we need to do the actual math, * pointer is to a range seg of some type; when we need to do the actual math,
* we'll figure out the real type. * we'll figure out the real type.
*/ */
typedef void range_seg_t; typedef void zfs_range_seg_t;
struct range_tree_ops { struct zfs_range_tree_ops {
void (*rtop_create)(range_tree_t *rt, void *arg); void (*rtop_create)(zfs_range_tree_t *rt, void *arg);
void (*rtop_destroy)(range_tree_t *rt, void *arg); void (*rtop_destroy)(zfs_range_tree_t *rt, void *arg);
void (*rtop_add)(range_tree_t *rt, void *rs, void *arg); void (*rtop_add)(zfs_range_tree_t *rt, void *rs, void *arg);
void (*rtop_remove)(range_tree_t *rt, void *rs, void *arg); void (*rtop_remove)(zfs_range_tree_t *rt, void *rs, void *arg);
void (*rtop_vacate)(range_tree_t *rt, void *arg); void (*rtop_vacate)(zfs_range_tree_t *rt, void *arg);
}; };
static inline uint64_t static inline uint64_t
rs_get_start_raw(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_start_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
return (((const range_seg32_t *)rs)->rs_start); return (((const range_seg32_t *)rs)->rs_start);
case RANGE_SEG64: case ZFS_RANGE_SEG64:
return (((const range_seg64_t *)rs)->rs_start); return (((const range_seg64_t *)rs)->rs_start);
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_start); return (((const range_seg_gap_t *)rs)->rs_start);
default: default:
VERIFY(0); VERIFY(0);
@ -134,15 +134,15 @@ rs_get_start_raw(const range_seg_t *rs, const range_tree_t *rt)
} }
static inline uint64_t static inline uint64_t
rs_get_end_raw(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_end_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
return (((const range_seg32_t *)rs)->rs_end); return (((const range_seg32_t *)rs)->rs_end);
case RANGE_SEG64: case ZFS_RANGE_SEG64:
return (((const range_seg64_t *)rs)->rs_end); return (((const range_seg64_t *)rs)->rs_end);
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_end); return (((const range_seg_gap_t *)rs)->rs_end);
default: default:
VERIFY(0); VERIFY(0);
@ -151,19 +151,19 @@ rs_get_end_raw(const range_seg_t *rs, const range_tree_t *rt)
} }
static inline uint64_t static inline uint64_t
rs_get_fill_raw(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_fill_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: { case ZFS_RANGE_SEG32: {
const range_seg32_t *r32 = (const range_seg32_t *)rs; const range_seg32_t *r32 = (const range_seg32_t *)rs;
return (r32->rs_end - r32->rs_start); return (r32->rs_end - r32->rs_start);
} }
case RANGE_SEG64: { case ZFS_RANGE_SEG64: {
const range_seg64_t *r64 = (const range_seg64_t *)rs; const range_seg64_t *r64 = (const range_seg64_t *)rs;
return (r64->rs_end - r64->rs_start); return (r64->rs_end - r64->rs_start);
} }
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_fill); return (((const range_seg_gap_t *)rs)->rs_fill);
default: default:
VERIFY(0); VERIFY(0);
@ -173,36 +173,36 @@ rs_get_fill_raw(const range_seg_t *rs, const range_tree_t *rt)
} }
static inline uint64_t static inline uint64_t
rs_get_start(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_start(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
return ((rs_get_start_raw(rs, rt) << rt->rt_shift) + rt->rt_start); return ((zfs_rs_get_start_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
} }
static inline uint64_t static inline uint64_t
rs_get_end(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_end(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
return ((rs_get_end_raw(rs, rt) << rt->rt_shift) + rt->rt_start); return ((zfs_rs_get_end_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
} }
static inline uint64_t static inline uint64_t
rs_get_fill(const range_seg_t *rs, const range_tree_t *rt) zfs_rs_get_fill(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{ {
return (rs_get_fill_raw(rs, rt) << rt->rt_shift); return (zfs_rs_get_fill_raw(rs, rt) << rt->rt_shift);
} }
static inline void static inline void
rs_set_start_raw(range_seg_t *rs, range_tree_t *rt, uint64_t start) zfs_rs_set_start_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t start)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
ASSERT3U(start, <=, UINT32_MAX); ASSERT3U(start, <=, UINT32_MAX);
((range_seg32_t *)rs)->rs_start = (uint32_t)start; ((range_seg32_t *)rs)->rs_start = (uint32_t)start;
break; break;
case RANGE_SEG64: case ZFS_RANGE_SEG64:
((range_seg64_t *)rs)->rs_start = start; ((range_seg64_t *)rs)->rs_start = start;
break; break;
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_start = start; ((range_seg_gap_t *)rs)->rs_start = start;
break; break;
default: default:
@ -211,18 +211,18 @@ rs_set_start_raw(range_seg_t *rs, range_tree_t *rt, uint64_t start)
} }
static inline void static inline void
rs_set_end_raw(range_seg_t *rs, range_tree_t *rt, uint64_t end) zfs_rs_set_end_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
ASSERT3U(end, <=, UINT32_MAX); ASSERT3U(end, <=, UINT32_MAX);
((range_seg32_t *)rs)->rs_end = (uint32_t)end; ((range_seg32_t *)rs)->rs_end = (uint32_t)end;
break; break;
case RANGE_SEG64: case ZFS_RANGE_SEG64:
((range_seg64_t *)rs)->rs_end = end; ((range_seg64_t *)rs)->rs_end = end;
break; break;
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_end = end; ((range_seg_gap_t *)rs)->rs_end = end;
break; break;
default: default:
@ -231,17 +231,18 @@ rs_set_end_raw(range_seg_t *rs, range_tree_t *rt, uint64_t end)
} }
static inline void static inline void
rs_set_fill_raw(range_seg_t *rs, range_tree_t *rt, uint64_t fill) zfs_zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt,
uint64_t fill)
{ {
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
/* fall through */ /* fall through */
case RANGE_SEG64: case ZFS_RANGE_SEG64:
ASSERT3U(fill, ==, rs_get_end_raw(rs, rt) - rs_get_start_raw(rs, ASSERT3U(fill, ==, zfs_rs_get_end_raw(rs, rt) -
rt)); zfs_rs_get_start_raw(rs, rt));
break; break;
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_fill = fill; ((range_seg_gap_t *)rs)->rs_fill = fill;
break; break;
default: default:
@ -250,67 +251,73 @@ rs_set_fill_raw(range_seg_t *rs, range_tree_t *rt, uint64_t fill)
} }
static inline void static inline void
rs_set_start(range_seg_t *rs, range_tree_t *rt, uint64_t start) zfs_rs_set_start(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t start)
{ {
ASSERT3U(start, >=, rt->rt_start); ASSERT3U(start, >=, rt->rt_start);
ASSERT(IS_P2ALIGNED(start, 1ULL << rt->rt_shift)); ASSERT(IS_P2ALIGNED(start, 1ULL << rt->rt_shift));
rs_set_start_raw(rs, rt, (start - rt->rt_start) >> rt->rt_shift); zfs_rs_set_start_raw(rs, rt, (start - rt->rt_start) >> rt->rt_shift);
} }
static inline void static inline void
rs_set_end(range_seg_t *rs, range_tree_t *rt, uint64_t end) zfs_rs_set_end(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
{ {
ASSERT3U(end, >=, rt->rt_start); ASSERT3U(end, >=, rt->rt_start);
ASSERT(IS_P2ALIGNED(end, 1ULL << rt->rt_shift)); ASSERT(IS_P2ALIGNED(end, 1ULL << rt->rt_shift));
rs_set_end_raw(rs, rt, (end - rt->rt_start) >> rt->rt_shift); zfs_rs_set_end_raw(rs, rt, (end - rt->rt_start) >> rt->rt_shift);
} }
static inline void static inline void
rs_set_fill(range_seg_t *rs, range_tree_t *rt, uint64_t fill) zfs_rs_set_fill(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{ {
ASSERT(IS_P2ALIGNED(fill, 1ULL << rt->rt_shift)); ASSERT(IS_P2ALIGNED(fill, 1ULL << rt->rt_shift));
rs_set_fill_raw(rs, rt, fill >> rt->rt_shift); zfs_zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
} }
typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size); typedef void zfs_range_tree_func_t(void *arg, uint64_t start, uint64_t size);
range_tree_t *range_tree_create_gap(const range_tree_ops_t *ops, zfs_range_tree_t *zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
range_seg_type_t type, void *arg, uint64_t start, uint64_t shift, zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t gap); uint64_t gap);
range_tree_t *range_tree_create(const range_tree_ops_t *ops, zfs_range_tree_t *zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
range_seg_type_t type, void *arg, uint64_t start, uint64_t shift); zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift);
void range_tree_destroy(range_tree_t *rt); void zfs_range_tree_destroy(zfs_range_tree_t *rt);
boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size); boolean_t zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start,
range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size); uint64_t size);
boolean_t range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size, zfs_range_seg_t *zfs_range_tree_find(zfs_range_tree_t *rt, uint64_t start,
uint64_t *ostart, uint64_t *osize); uint64_t size);
void range_tree_verify_not_present(range_tree_t *rt, boolean_t zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start,
uint64_t size, uint64_t *ostart, uint64_t *osize);
void zfs_range_tree_verify_not_present(zfs_range_tree_t *rt,
uint64_t start, uint64_t size); uint64_t start, uint64_t size);
void range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs, void zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
uint64_t newstart, uint64_t newsize); uint64_t newstart, uint64_t newsize);
uint64_t range_tree_space(range_tree_t *rt); uint64_t zfs_range_tree_space(zfs_range_tree_t *rt);
uint64_t range_tree_numsegs(range_tree_t *rt); uint64_t zfs_range_tree_numsegs(zfs_range_tree_t *rt);
boolean_t range_tree_is_empty(range_tree_t *rt); boolean_t zfs_range_tree_is_empty(zfs_range_tree_t *rt);
void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst); void zfs_range_tree_swap(zfs_range_tree_t **rtsrc, zfs_range_tree_t **rtdst);
void range_tree_stat_verify(range_tree_t *rt); void zfs_range_tree_stat_verify(zfs_range_tree_t *rt);
uint64_t range_tree_min(range_tree_t *rt); uint64_t zfs_range_tree_min(zfs_range_tree_t *rt);
uint64_t range_tree_max(range_tree_t *rt); uint64_t zfs_range_tree_max(zfs_range_tree_t *rt);
uint64_t range_tree_span(range_tree_t *rt); uint64_t zfs_range_tree_span(zfs_range_tree_t *rt);
void range_tree_add(void *arg, uint64_t start, uint64_t size); void zfs_range_tree_add(void *arg, uint64_t start, uint64_t size);
void range_tree_remove(void *arg, uint64_t start, uint64_t size); void zfs_range_tree_remove(void *arg, uint64_t start, uint64_t size);
void range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size); void zfs_range_tree_remove_fill(zfs_range_tree_t *rt, uint64_t start,
void range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta); uint64_t size);
void range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size); void zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
int64_t delta);
void zfs_range_tree_clear(zfs_range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg); void zfs_range_tree_vacate(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg); void *arg);
range_seg_t *range_tree_first(range_tree_t *rt); void zfs_range_tree_walk(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
void *arg);
zfs_range_seg_t *zfs_range_tree_first(zfs_range_tree_t *rt);
void range_tree_remove_xor_add_segment(uint64_t start, uint64_t end, void zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
range_tree_t *removefrom, range_tree_t *addto); zfs_range_tree_t *removefrom, zfs_range_tree_t *addto);
void range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom, void zfs_range_tree_remove_xor_add(zfs_range_tree_t *rt,
range_tree_t *addto); zfs_range_tree_t *removefrom, zfs_range_tree_t *addto);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -207,28 +207,28 @@ boolean_t sm_entry_is_double_word(uint64_t e);
typedef int (*sm_cb_t)(space_map_entry_t *sme, void *arg); typedef int (*sm_cb_t)(space_map_entry_t *sme, void *arg);
int space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype); int space_map_load(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype);
int space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype, int space_map_load_length(space_map_t *sm, zfs_range_tree_t *rt,
uint64_t length); maptype_t maptype, uint64_t length);
int space_map_iterate(space_map_t *sm, uint64_t length, int space_map_iterate(space_map_t *sm, uint64_t length,
sm_cb_t callback, void *arg); sm_cb_t callback, void *arg);
int space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg, int space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
dmu_tx_t *tx); dmu_tx_t *tx);
boolean_t space_map_histogram_verify(space_map_t *sm, range_tree_t *rt); boolean_t space_map_histogram_verify(space_map_t *sm, zfs_range_tree_t *rt);
void space_map_histogram_clear(space_map_t *sm); void space_map_histogram_clear(space_map_t *sm);
void space_map_histogram_add(space_map_t *sm, range_tree_t *rt, void space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt,
dmu_tx_t *tx); dmu_tx_t *tx);
uint64_t space_map_object(space_map_t *sm); uint64_t space_map_object(space_map_t *sm);
int64_t space_map_allocated(space_map_t *sm); int64_t space_map_allocated(space_map_t *sm);
uint64_t space_map_length(space_map_t *sm); uint64_t space_map_length(space_map_t *sm);
uint64_t space_map_entries(space_map_t *sm, range_tree_t *rt); uint64_t space_map_entries(space_map_t *sm, zfs_range_tree_t *rt);
uint64_t space_map_nblocks(space_map_t *sm); uint64_t space_map_nblocks(space_map_t *sm);
void space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, void space_map_write(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx); uint64_t vdev_id, dmu_tx_t *tx);
uint64_t space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt, uint64_t space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
uint64_t vdev_id); uint64_t vdev_id);
void space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx); void space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx);
uint64_t space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx); uint64_t space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx);

View File

@ -46,8 +46,8 @@ void space_reftree_create(avl_tree_t *t);
void space_reftree_destroy(avl_tree_t *t); void space_reftree_destroy(avl_tree_t *t);
void space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end, void space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
int64_t refcnt); int64_t refcnt);
void space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt); void space_reftree_add_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t refcnt);
void space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, void space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt,
int64_t minref); int64_t minref);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -299,7 +299,8 @@ struct vdev {
kcondvar_t vdev_initialize_cv; kcondvar_t vdev_initialize_cv;
uint64_t vdev_initialize_offset[TXG_SIZE]; uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_initialize_last_offset; uint64_t vdev_initialize_last_offset;
range_tree_t *vdev_initialize_tree; /* valid while initializing */ /* valid while initializing */
zfs_range_tree_t *vdev_initialize_tree;
uint64_t vdev_initialize_bytes_est; uint64_t vdev_initialize_bytes_est;
uint64_t vdev_initialize_bytes_done; uint64_t vdev_initialize_bytes_done;
uint64_t vdev_initialize_action_time; /* start and end time */ uint64_t vdev_initialize_action_time; /* start and end time */
@ -375,7 +376,7 @@ struct vdev {
* from multiple zio threads. * from multiple zio threads.
*/ */
kmutex_t vdev_obsolete_lock; kmutex_t vdev_obsolete_lock;
range_tree_t *vdev_obsolete_segments; zfs_range_tree_t *vdev_obsolete_segments;
space_map_t *vdev_obsolete_sm; space_map_t *vdev_obsolete_sm;
/* /*
@ -388,7 +389,7 @@ struct vdev {
/* /*
* Leaf vdev state. * Leaf vdev state.
*/ */
range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */ zfs_range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */ space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */ txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */ uint64_t vdev_dtl_object; /* DTL object */

View File

@ -65,7 +65,8 @@ typedef struct vdev_rebuild_phys {
typedef struct vdev_rebuild { typedef struct vdev_rebuild {
vdev_t *vr_top_vdev; /* top-level vdev to rebuild */ vdev_t *vr_top_vdev; /* top-level vdev to rebuild */
metaslab_t *vr_scan_msp; /* scanning disabled metaslab */ metaslab_t *vr_scan_msp; /* scanning disabled metaslab */
range_tree_t *vr_scan_tree; /* scan ranges (in metaslab) */ /* scan ranges (in metaslab) */
zfs_range_tree_t *vr_scan_tree;
kmutex_t vr_io_lock; /* inflight IO lock */ kmutex_t vr_io_lock; /* inflight IO lock */
kcondvar_t vr_io_cv; /* inflight IO cv */ kcondvar_t vr_io_cv; /* inflight IO cv */

View File

@ -35,7 +35,7 @@ typedef struct spa_vdev_removal {
/* Thread performing a vdev removal. */ /* Thread performing a vdev removal. */
kthread_t *svr_thread; kthread_t *svr_thread;
/* Segments left to copy from the current metaslab. */ /* Segments left to copy from the current metaslab. */
range_tree_t *svr_allocd_segs; zfs_range_tree_t *svr_allocd_segs;
kmutex_t svr_lock; kmutex_t svr_lock;
kcondvar_t svr_cv; kcondvar_t svr_cv;
boolean_t svr_thread_exit; boolean_t svr_thread_exit;
@ -49,7 +49,7 @@ typedef struct spa_vdev_removal {
* Ranges that were freed while a mapping was in flight. This is * Ranges that were freed while a mapping was in flight. This is
* a subset of the ranges covered by vdev_im_new_segments. * a subset of the ranges covered by vdev_im_new_segments.
*/ */
range_tree_t *svr_frees[TXG_SIZE]; zfs_range_tree_t *svr_frees[TXG_SIZE];
/* /*
* Number of bytes which we have finished our work for * Number of bytes which we have finished our work for

View File

@ -2193,7 +2193,7 @@ dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
mutex_enter(&dn->dn_mtx); mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK; int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) { if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
} }
if (dn->dn_nlevels == 1) { if (dn->dn_nlevels == 1) {
@ -2400,7 +2400,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
db->db_blkid != DMU_SPILL_BLKID) { db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx); mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) { if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], zfs_range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1); db->db_blkid, 1);
} }
mutex_exit(&dn->dn_mtx); mutex_exit(&dn->dn_mtx);

View File

@ -2435,11 +2435,11 @@ done:
{ {
int txgoff = tx->tx_txg & TXG_MASK; int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) { if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = range_tree_create(NULL, dn->dn_free_ranges[txgoff] = zfs_range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0); ZFS_RANGE_SEG64, NULL, 0, 0);
} }
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks); zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks); zfs_range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
} }
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks, (u_longlong_t)blkid, (u_longlong_t)nblks,
@ -2482,7 +2482,7 @@ dnode_block_freed(dnode_t *dn, uint64_t blkid)
mutex_enter(&dn->dn_mtx); mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) { for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL && if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1)) zfs_range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break; break;
} }
mutex_exit(&dn->dn_mtx); mutex_exit(&dn->dn_mtx);

View File

@ -720,7 +720,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dn->dn_maxblkid == 0 || list_head(list) != NULL || dn->dn_maxblkid == 0 || list_head(list) != NULL ||
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
dnp->dn_datablkszsec || dnp->dn_datablkszsec ||
!range_tree_is_empty(dn->dn_free_ranges[txgoff])); !zfs_range_tree_is_empty(dn->dn_free_ranges[txgoff]));
dnp->dn_datablkszsec = dnp->dn_datablkszsec =
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
dn->dn_next_blksz[txgoff] = 0; dn->dn_next_blksz[txgoff] = 0;
@ -786,21 +786,22 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dsfra.dsfra_free_indirects = freeing_dnode; dsfra.dsfra_free_indirects = freeing_dnode;
mutex_enter(&dn->dn_mtx); mutex_enter(&dn->dn_mtx);
if (freeing_dnode) { if (freeing_dnode) {
ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], ASSERT(zfs_range_tree_contains(
0, dn->dn_maxblkid + 1)); dn->dn_free_ranges[txgoff], 0,
dn->dn_maxblkid + 1));
} }
/* /*
* Because dnode_sync_free_range() must drop dn_mtx during its * Because dnode_sync_free_range() must drop dn_mtx during its
* processing, using it as a callback to range_tree_vacate() is * processing, using it as a callback to zfs_range_tree_vacate()
* not safe. No other operations (besides destroy) are allowed * is not safe. No other operations (besides destroy) are
* once range_tree_vacate() has begun, and dropping dn_mtx * allowed once zfs_range_tree_vacate() has begun, and dropping
* would leave a window open for another thread to observe that * dn_mtx would leave a window open for another thread to
* invalid (and unsafe) state. * observe that invalid (and unsafe) state.
*/ */
range_tree_walk(dn->dn_free_ranges[txgoff], zfs_range_tree_walk(dn->dn_free_ranges[txgoff],
dnode_sync_free_range, &dsfra); dnode_sync_free_range, &dsfra);
range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL); zfs_range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
range_tree_destroy(dn->dn_free_ranges[txgoff]); zfs_range_tree_destroy(dn->dn_free_ranges[txgoff]);
dn->dn_free_ranges[txgoff] = NULL; dn->dn_free_ranges[txgoff] = NULL;
mutex_exit(&dn->dn_mtx); mutex_exit(&dn->dn_mtx);
} }

View File

@ -652,8 +652,8 @@ dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
VERIFY(range_tree_is_empty(ms->ms_freeing)); VERIFY(zfs_range_tree_is_empty(ms->ms_freeing));
VERIFY(range_tree_is_empty(ms->ms_checkpointing)); VERIFY(zfs_range_tree_is_empty(ms->ms_checkpointing));
} }
} }

View File

@ -321,7 +321,7 @@ struct dsl_scan_io_queue {
zio_t *q_zio; /* scn_zio_root child for waiting on IO */ zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */ /* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr; zfs_range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size; zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr; avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused; uint64_t q_sio_memused;
@ -814,7 +814,8 @@ dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL); NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); ASSERT3P(zfs_range_tree_first(q->q_exts_by_addr), ==,
NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock); mutex_exit(&vd->vdev_scan_io_queue_lock);
} }
@ -3277,13 +3278,14 @@ scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
/* /*
* This function removes sios from an IO queue which reside within a given * This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that * zfs_range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios * we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we * to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE. * return B_TRUE and otherwise B_FALSE.
*/ */
static boolean_t static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) scan_io_queue_gather(dsl_scan_io_queue_t *queue, zfs_range_seg_t *rs,
list_t *list)
{ {
scan_io_t *srch_sio, *sio, *next_sio; scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx; avl_index_t idx;
@ -3295,7 +3297,7 @@ scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
srch_sio = sio_alloc(1); srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1; srch_sio->sio_nr_dvas = 1;
SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); SIO_SET_OFFSET(srch_sio, zfs_rs_get_start(rs, queue->q_exts_by_addr));
/* /*
* The exact start of the extent might not contain any matching zios, * The exact start of the extent might not contain any matching zios,
@ -3307,11 +3309,11 @@ scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
if (sio == NULL) if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, while (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) { queue->q_exts_by_addr) && num_sios <= 32) {
ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, ASSERT3U(SIO_GET_OFFSET(sio), >=, zfs_rs_get_start(rs,
queue->q_exts_by_addr)); queue->q_exts_by_addr));
ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, ASSERT3U(SIO_GET_END_OFFSET(sio), <=, zfs_rs_get_end(rs,
queue->q_exts_by_addr)); queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
@ -3332,19 +3334,20 @@ scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
* in the segment we update it to reflect the work we were able to * in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely. * complete. Otherwise, we remove it from the range tree entirely.
*/ */
if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, if (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs,
queue->q_exts_by_addr)) { queue->q_exts_by_addr)) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs, zfs_range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued); -bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs, zfs_range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs, SIO_GET_OFFSET(sio), zfs_rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
queue->q_last_ext_addr = SIO_GET_OFFSET(sio); queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE); return (B_TRUE);
} else { } else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); uint64_t rstart = zfs_rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); uint64_t rend = zfs_rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); zfs_range_tree_remove(queue->q_exts_by_addr, rstart, rend -
rstart);
queue->q_last_ext_addr = -1; queue->q_last_ext_addr = -1;
return (B_FALSE); return (B_FALSE);
} }
@ -3361,11 +3364,11 @@ scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
* memory limit. * memory limit.
* 3) Otherwise we don't select any extents. * 3) Otherwise we don't select any extents.
*/ */
static range_seg_t * static zfs_range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{ {
dsl_scan_t *scn = queue->q_scn; dsl_scan_t *scn = queue->q_scn;
range_tree_t *rt = queue->q_exts_by_addr; zfs_range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted); ASSERT(scn->scn_is_sorted);
@ -3384,7 +3387,7 @@ scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
*/ */
if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) || if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
zfs_scan_issue_strategy == 1) zfs_scan_issue_strategy == 1)
return (range_tree_first(rt)); return (zfs_range_tree_first(rt));
/* /*
* Try to continue previous extent if it is not completed yet. After * Try to continue previous extent if it is not completed yet. After
@ -3393,10 +3396,10 @@ scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
*/ */
uint64_t start; uint64_t start;
uint64_t size = 1ULL << rt->rt_shift; uint64_t size = 1ULL << rt->rt_shift;
range_seg_t *addr_rs; zfs_range_seg_t *addr_rs;
if (queue->q_last_ext_addr != -1) { if (queue->q_last_ext_addr != -1) {
start = queue->q_last_ext_addr; start = queue->q_last_ext_addr;
addr_rs = range_tree_find(rt, start, size); addr_rs = zfs_range_tree_find(rt, start, size);
if (addr_rs != NULL) if (addr_rs != NULL)
return (addr_rs); return (addr_rs);
} }
@ -3413,10 +3416,10 @@ scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
* We need to get the original entry in the by_addr tree so we can * We need to get the original entry in the by_addr tree so we can
* modify it. * modify it.
*/ */
addr_rs = range_tree_find(rt, start, size); addr_rs = zfs_range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL); ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(addr_rs, rt), ==, start); ASSERT3U(zfs_rs_get_start(addr_rs, rt), ==, start);
ASSERT3U(rs_get_end(addr_rs, rt), >, start); ASSERT3U(zfs_rs_get_end(addr_rs, rt), >, start);
return (addr_rs); return (addr_rs);
} }
@ -3426,7 +3429,7 @@ scan_io_queues_run_one(void *arg)
dsl_scan_io_queue_t *queue = arg; dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE; boolean_t suspended = B_FALSE;
range_seg_t *rs; zfs_range_seg_t *rs;
scan_io_t *sio; scan_io_t *sio;
zio_t *zio; zio_t *zio;
list_t sio_list; list_t sio_list;
@ -4723,7 +4726,7 @@ scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
} }
avl_insert(&queue->q_sios_by_addr, sio, idx); avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio); queue->q_sio_memused += SIO_GET_MUSED(sio);
range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), zfs_range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
SIO_GET_ASIZE(sio)); SIO_GET_ASIZE(sio));
} }
@ -4983,7 +4986,7 @@ ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
ext_size_compare) ext_size_compare)
static void static void
ext_size_create(range_tree_t *rt, void *arg) ext_size_create(zfs_range_tree_t *rt, void *arg)
{ {
(void) rt; (void) rt;
zfs_btree_t *size_tree = arg; zfs_btree_t *size_tree = arg;
@ -4993,7 +4996,7 @@ ext_size_create(range_tree_t *rt, void *arg)
} }
static void static void
ext_size_destroy(range_tree_t *rt, void *arg) ext_size_destroy(zfs_range_tree_t *rt, void *arg)
{ {
(void) rt; (void) rt;
zfs_btree_t *size_tree = arg; zfs_btree_t *size_tree = arg;
@ -5003,7 +5006,7 @@ ext_size_destroy(range_tree_t *rt, void *arg)
} }
static uint64_t static uint64_t
ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg) ext_size_value(zfs_range_tree_t *rt, range_seg_gap_t *rsg)
{ {
(void) rt; (void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start; uint64_t size = rsg->rs_end - rsg->rs_start;
@ -5014,25 +5017,25 @@ ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
} }
static void static void
ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg) ext_size_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{ {
zfs_btree_t *size_tree = arg; zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v); zfs_btree_add(size_tree, &v);
} }
static void static void
ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg) ext_size_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{ {
zfs_btree_t *size_tree = arg; zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v); zfs_btree_remove(size_tree, &v);
} }
static void static void
ext_size_vacate(range_tree_t *rt, void *arg) ext_size_vacate(zfs_range_tree_t *rt, void *arg)
{ {
zfs_btree_t *size_tree = arg; zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree); zfs_btree_clear(size_tree);
@ -5041,7 +5044,7 @@ ext_size_vacate(range_tree_t *rt, void *arg)
ext_size_create(rt, arg); ext_size_create(rt, arg);
} }
static const range_tree_ops_t ext_size_ops = { static const zfs_range_tree_ops_t ext_size_ops = {
.rtop_create = ext_size_create, .rtop_create = ext_size_create,
.rtop_destroy = ext_size_destroy, .rtop_destroy = ext_size_destroy,
.rtop_add = ext_size_add, .rtop_add = ext_size_add,
@ -5073,8 +5076,9 @@ scan_io_queue_create(vdev_t *vd)
q->q_sio_memused = 0; q->q_sio_memused = 0;
q->q_last_ext_addr = -1; q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP, q->q_exts_by_addr = zfs_range_tree_create_gap(&ext_size_ops,
&q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap); ZFS_RANGE_SEG_GAP, &q->q_exts_by_size, 0, vd->vdev_ashift,
zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare, avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
@ -5099,15 +5103,15 @@ dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
atomic_add_64(&scn->scn_queues_pending, -1); atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) { NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr, ASSERT(zfs_range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
queue->q_sio_memused -= SIO_GET_MUSED(sio); queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio); sio_free(sio);
} }
ASSERT0(queue->q_sio_memused); ASSERT0(queue->q_sio_memused);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue); zfs_range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr); zfs_range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr); avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv); cv_destroy(&queue->q_zio_cv);
@ -5184,10 +5188,10 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
* 1) Cold, just sitting in the queue of zio's to be issued at * 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is * some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement * remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and * its data volume from the containing zfs_range_seg_t and
* resort the q_exts_by_size tree to reflect that the * resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten * zfs_range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be * the zfs_range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise * worth the extra hassle of trying keep track of precise
* extent boundaries. * extent boundaries.
* 2) Hot, where the zio is currently in-flight in * 2) Hot, where the zio is currently in-flight in
@ -5211,8 +5215,9 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
atomic_add_64(&scn->scn_queues_pending, -1); atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio); queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); ASSERT(zfs_range_tree_contains(queue->q_exts_by_addr, start,
range_tree_remove_fill(queue->q_exts_by_addr, start, size); size));
zfs_range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/* count the block as though we skipped it */ /* count the block as though we skipped it */
sio2bp(sio, &tmpbp); sio2bp(sio, &tmpbp);

File diff suppressed because it is too large Load Diff

View File

@ -42,11 +42,11 @@
* splitting in response to range add/remove requests. * splitting in response to range add/remove requests.
* *
* A range tree starts out completely empty, with no segments in it. * A range tree starts out completely empty, with no segments in it.
* Adding an allocation via range_tree_add to the range tree can either: * Adding an allocation via zfs_range_tree_add to the range tree can either:
* 1) create a new extent * 1) create a new extent
* 2) extend an adjacent extent * 2) extend an adjacent extent
* 3) merge two adjacent extents * 3) merge two adjacent extents
* Conversely, removing an allocation via range_tree_remove can: * Conversely, removing an allocation via zfs_range_tree_remove can:
* 1) completely remove an extent * 1) completely remove an extent
* 2) shorten an extent (if the allocation was near one of its ends) * 2) shorten an extent (if the allocation was near one of its ends)
* 3) split an extent into two extents, in effect punching a hole * 3) split an extent into two extents, in effect punching a hole
@ -54,16 +54,16 @@
* A range tree is also capable of 'bridging' gaps when adding * A range tree is also capable of 'bridging' gaps when adding
* allocations. This is useful for cases when close proximity of * allocations. This is useful for cases when close proximity of
* allocations is an important detail that needs to be represented * allocations is an important detail that needs to be represented
* in the range tree. See range_tree_set_gap(). The default behavior * in the range tree. See zfs_range_tree_set_gap(). The default behavior
* is not to bridge gaps (i.e. the maximum allowed gap size is 0). * is not to bridge gaps (i.e. the maximum allowed gap size is 0).
* *
* In order to traverse a range tree, use either the range_tree_walk() * In order to traverse a range tree, use either the zfs_range_tree_walk()
* or range_tree_vacate() functions. * or zfs_range_tree_vacate() functions.
* *
* To obtain more accurate information on individual segment * To obtain more accurate information on individual segment
* operations that the range tree performs "under the hood", you can * operations that the range tree performs "under the hood", you can
* specify a set of callbacks by passing a range_tree_ops_t structure * specify a set of callbacks by passing a zfs_range_tree_ops_t structure
* to the range_tree_create function. Any callbacks that are non-NULL * to the zfs_range_tree_create function. Any callbacks that are non-NULL
* are then called at the appropriate times. * are then called at the appropriate times.
* *
* The range tree code also supports a special variant of range trees * The range tree code also supports a special variant of range trees
@ -76,18 +76,18 @@
*/ */
static inline void static inline void
rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt) zfs_rs_copy(zfs_range_seg_t *src, zfs_range_seg_t *dest, zfs_range_tree_t *rt)
{ {
ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES); ASSERT3U(rt->rt_type, <, ZFS_RANGE_SEG_NUM_TYPES);
size_t size = 0; size_t size = 0;
switch (rt->rt_type) { switch (rt->rt_type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
size = sizeof (range_seg32_t); size = sizeof (range_seg32_t);
break; break;
case RANGE_SEG64: case ZFS_RANGE_SEG64:
size = sizeof (range_seg64_t); size = sizeof (range_seg64_t);
break; break;
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t); size = sizeof (range_seg_gap_t);
break; break;
default: default:
@ -97,16 +97,17 @@ rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
} }
void void
range_tree_stat_verify(range_tree_t *rt) zfs_range_tree_stat_verify(zfs_range_tree_t *rt)
{ {
range_seg_t *rs; zfs_range_seg_t *rs;
zfs_btree_index_t where; zfs_btree_index_t where;
uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 }; uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i; int i;
for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL; for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) { rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); uint64_t size = zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1; int idx = highbit64(size) - 1;
hist[idx]++; hist[idx]++;
@ -124,9 +125,9 @@ range_tree_stat_verify(range_tree_t *rt)
} }
static void static void
range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs) zfs_range_tree_stat_incr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
{ {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1; int idx = highbit64(size) - 1;
ASSERT(size != 0); ASSERT(size != 0);
@ -138,9 +139,9 @@ range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
} }
static void static void
range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs) zfs_range_tree_stat_decr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
{ {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1; int idx = highbit64(size) - 1;
ASSERT(size != 0); ASSERT(size != 0);
@ -153,7 +154,7 @@ range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
__attribute__((always_inline)) inline __attribute__((always_inline)) inline
static int static int
range_tree_seg32_compare(const void *x1, const void *x2) zfs_range_tree_seg32_compare(const void *x1, const void *x2)
{ {
const range_seg32_t *r1 = x1; const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2; const range_seg32_t *r2 = x2;
@ -166,7 +167,7 @@ range_tree_seg32_compare(const void *x1, const void *x2)
__attribute__((always_inline)) inline __attribute__((always_inline)) inline
static int static int
range_tree_seg64_compare(const void *x1, const void *x2) zfs_range_tree_seg64_compare(const void *x1, const void *x2)
{ {
const range_seg64_t *r1 = x1; const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2; const range_seg64_t *r2 = x2;
@ -179,7 +180,7 @@ range_tree_seg64_compare(const void *x1, const void *x2)
__attribute__((always_inline)) inline __attribute__((always_inline)) inline
static int static int
range_tree_seg_gap_compare(const void *x1, const void *x2) zfs_range_tree_seg_gap_compare(const void *x1, const void *x2)
{ {
const range_seg_gap_t *r1 = x1; const range_seg_gap_t *r1 = x1;
const range_seg_gap_t *r2 = x2; const range_seg_gap_t *r2 = x2;
@ -190,41 +191,42 @@ range_tree_seg_gap_compare(const void *x1, const void *x2)
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start)); return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
} }
ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg32_find_in_buf, range_seg32_t, ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg32_find_in_buf, range_seg32_t,
range_tree_seg32_compare) zfs_range_tree_seg32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg64_find_in_buf, range_seg64_t, ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, range_seg64_t,
range_tree_seg64_compare) zfs_range_tree_seg64_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg_gap_find_in_buf, range_seg_gap_t, ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf, range_seg_gap_t,
range_tree_seg_gap_compare) zfs_range_tree_seg_gap_compare)
range_tree_t * zfs_range_tree_t *
range_tree_create_gap(const range_tree_ops_t *ops, range_seg_type_t type, zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
void *arg, uint64_t start, uint64_t shift, uint64_t gap) zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t gap)
{ {
range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP); zfs_range_tree_t *rt = kmem_zalloc(sizeof (zfs_range_tree_t), KM_SLEEP);
ASSERT3U(shift, <, 64); ASSERT3U(shift, <, 64);
ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES); ASSERT3U(type, <=, ZFS_RANGE_SEG_NUM_TYPES);
size_t size; size_t size;
int (*compare) (const void *, const void *); int (*compare) (const void *, const void *);
bt_find_in_buf_f bt_find; bt_find_in_buf_f bt_find;
switch (type) { switch (type) {
case RANGE_SEG32: case ZFS_RANGE_SEG32:
size = sizeof (range_seg32_t); size = sizeof (range_seg32_t);
compare = range_tree_seg32_compare; compare = zfs_range_tree_seg32_compare;
bt_find = range_tree_seg32_find_in_buf; bt_find = zfs_range_tree_seg32_find_in_buf;
break; break;
case RANGE_SEG64: case ZFS_RANGE_SEG64:
size = sizeof (range_seg64_t); size = sizeof (range_seg64_t);
compare = range_tree_seg64_compare; compare = zfs_range_tree_seg64_compare;
bt_find = range_tree_seg64_find_in_buf; bt_find = zfs_range_tree_seg64_find_in_buf;
break; break;
case RANGE_SEG_GAP: case ZFS_RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t); size = sizeof (range_seg_gap_t);
compare = range_tree_seg_gap_compare; compare = zfs_range_tree_seg_gap_compare;
bt_find = range_tree_seg_gap_find_in_buf; bt_find = zfs_range_tree_seg_gap_find_in_buf;
break; break;
default: default:
panic("Invalid range seg type %d", type); panic("Invalid range seg type %d", type);
@ -244,15 +246,15 @@ range_tree_create_gap(const range_tree_ops_t *ops, range_seg_type_t type,
return (rt); return (rt);
} }
range_tree_t * zfs_range_tree_t *
range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type, zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
void *arg, uint64_t start, uint64_t shift) zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift)
{ {
return (range_tree_create_gap(ops, type, arg, start, shift, 0)); return (zfs_range_tree_create_gap(ops, type, arg, start, shift, 0));
} }
void void
range_tree_destroy(range_tree_t *rt) zfs_range_tree_destroy(zfs_range_tree_t *rt)
{ {
VERIFY0(rt->rt_space); VERIFY0(rt->rt_space);
@ -264,35 +266,36 @@ range_tree_destroy(range_tree_t *rt)
} }
void void
range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta) zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
int64_t delta)
{ {
if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) { if (delta < 0 && delta * -1 >= zfs_rs_get_fill(rs, rt)) {
zfs_panic_recover("zfs: attempting to decrease fill to or " zfs_panic_recover("zfs: attempting to decrease fill to or "
"below 0; probable double remove in segment [%llx:%llx]", "below 0; probable double remove in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt), (longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt)); (longlong_t)zfs_rs_get_end(rs, rt));
} }
if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) - if (zfs_rs_get_fill(rs, rt) + delta > zfs_rs_get_end(rs, rt) -
rs_get_start(rs, rt)) { zfs_rs_get_start(rs, rt)) {
zfs_panic_recover("zfs: attempting to increase fill beyond " zfs_panic_recover("zfs: attempting to increase fill beyond "
"max; probable double add in segment [%llx:%llx]", "max; probable double add in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt), (longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt)); (longlong_t)zfs_rs_get_end(rs, rt));
} }
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta); zfs_rs_set_fill(rs, rt, zfs_rs_get_fill(rs, rt) + delta);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
} }
static void static void
range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill) zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
{ {
range_tree_t *rt = arg; zfs_range_tree_t *rt = arg;
zfs_btree_index_t where; zfs_btree_index_t where;
range_seg_t *rs_before, *rs_after, *rs; zfs_range_seg_t *rs_before, *rs_after, *rs;
range_seg_max_t tmp, rsearch; range_seg_max_t tmp, rsearch;
uint64_t end = start + size, gap = rt->rt_gap; uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0; uint64_t bridge_size = 0;
@ -302,8 +305,8 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
ASSERT3U(fill, <=, size); ASSERT3U(fill, <=, size);
ASSERT3U(start + size, >, start); ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start); zfs_rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end); zfs_rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/* /*
@ -321,26 +324,26 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
(longlong_t)start, (longlong_t)size); (longlong_t)start, (longlong_t)size);
return; return;
} }
uint64_t rstart = rs_get_start(rs, rt); uint64_t rstart = zfs_rs_get_start(rs, rt);
uint64_t rend = rs_get_end(rs, rt); uint64_t rend = zfs_rs_get_end(rs, rt);
if (rstart <= start && rend >= end) { if (rstart <= start && rend >= end) {
range_tree_adjust_fill(rt, rs, fill); zfs_range_tree_adjust_fill(rt, rs, fill);
return; return;
} }
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
range_tree_stat_decr(rt, rs); zfs_range_tree_stat_decr(rt, rs);
rt->rt_space -= rend - rstart; rt->rt_space -= rend - rstart;
fill += rs_get_fill(rs, rt); fill += zfs_rs_get_fill(rs, rt);
start = MIN(start, rstart); start = MIN(start, rstart);
end = MAX(end, rend); end = MAX(end, rend);
size = end - start; size = end - start;
zfs_btree_remove(&rt->rt_root, rs); zfs_btree_remove(&rt->rt_root, rs);
range_tree_add_impl(rt, start, size, fill); zfs_range_tree_add_impl(rt, start, size, fill);
return; return;
} }
@ -355,15 +358,15 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before); rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after); rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >= merge_before = (rs_before != NULL && zfs_rs_get_end(rs_before, rt) >=
start - gap); start - gap);
merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end + merge_after = (rs_after != NULL && zfs_rs_get_start(rs_after, rt) <=
gap); end + gap);
if (merge_before && gap != 0) if (merge_before && gap != 0)
bridge_size += start - rs_get_end(rs_before, rt); bridge_size += start - zfs_rs_get_end(rs_before, rt);
if (merge_after && gap != 0) if (merge_after && gap != 0)
bridge_size += rs_get_start(rs_after, rt) - end; bridge_size += zfs_rs_get_start(rs_after, rt) - end;
if (merge_before && merge_after) { if (merge_before && merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) { if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
@ -371,13 +374,13 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
} }
range_tree_stat_decr(rt, rs_before); zfs_range_tree_stat_decr(rt, rs_before);
range_tree_stat_decr(rt, rs_after); zfs_range_tree_stat_decr(rt, rs_after);
rs_copy(rs_after, &tmp, rt); zfs_rs_copy(rs_after, &tmp, rt);
uint64_t before_start = rs_get_start_raw(rs_before, rt); uint64_t before_start = zfs_rs_get_start_raw(rs_before, rt);
uint64_t before_fill = rs_get_fill(rs_before, rt); uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
uint64_t after_fill = rs_get_fill(rs_after, rt); uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
zfs_btree_remove_idx(&rt->rt_root, &where_before); zfs_btree_remove_idx(&rt->rt_root, &where_before);
/* /*
@ -386,76 +389,76 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
*/ */
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after); rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
ASSERT3P(rs_after, !=, NULL); ASSERT3P(rs_after, !=, NULL);
rs_set_start_raw(rs_after, rt, before_start); zfs_rs_set_start_raw(rs_after, rt, before_start);
rs_set_fill(rs_after, rt, after_fill + before_fill + fill); zfs_rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
rs = rs_after; rs = rs_after;
} else if (merge_before) { } else if (merge_before) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
range_tree_stat_decr(rt, rs_before); zfs_range_tree_stat_decr(rt, rs_before);
uint64_t before_fill = rs_get_fill(rs_before, rt); uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
rs_set_end(rs_before, rt, end); zfs_rs_set_end(rs_before, rt, end);
rs_set_fill(rs_before, rt, before_fill + fill); zfs_rs_set_fill(rs_before, rt, before_fill + fill);
rs = rs_before; rs = rs_before;
} else if (merge_after) { } else if (merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
range_tree_stat_decr(rt, rs_after); zfs_range_tree_stat_decr(rt, rs_after);
uint64_t after_fill = rs_get_fill(rs_after, rt); uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
rs_set_start(rs_after, rt, start); zfs_rs_set_start(rs_after, rt, start);
rs_set_fill(rs_after, rt, after_fill + fill); zfs_rs_set_fill(rs_after, rt, after_fill + fill);
rs = rs_after; rs = rs_after;
} else { } else {
rs = &tmp; rs = &tmp;
rs_set_start(rs, rt, start); zfs_rs_set_start(rs, rt, start);
rs_set_end(rs, rt, end); zfs_rs_set_end(rs, rt, end);
rs_set_fill(rs, rt, fill); zfs_rs_set_fill(rs, rt, fill);
zfs_btree_add_idx(&rt->rt_root, rs, &where); zfs_btree_add_idx(&rt->rt_root, rs, &where);
} }
if (gap != 0) { if (gap != 0) {
ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) - ASSERT3U(zfs_rs_get_fill(rs, rt), <=, zfs_rs_get_end(rs, rt) -
rs_get_start(rs, rt)); zfs_rs_get_start(rs, rt));
} else { } else {
ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) - ASSERT3U(zfs_rs_get_fill(rs, rt), ==, zfs_rs_get_end(rs, rt) -
rs_get_start(rs, rt)); zfs_rs_get_start(rs, rt));
} }
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
range_tree_stat_incr(rt, rs); zfs_range_tree_stat_incr(rt, rs);
rt->rt_space += size + bridge_size; rt->rt_space += size + bridge_size;
} }
void void
range_tree_add(void *arg, uint64_t start, uint64_t size) zfs_range_tree_add(void *arg, uint64_t start, uint64_t size)
{ {
range_tree_add_impl(arg, start, size, size); zfs_range_tree_add_impl(arg, start, size, size);
} }
static void static void
range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size, zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
boolean_t do_fill) boolean_t do_fill)
{ {
zfs_btree_index_t where; zfs_btree_index_t where;
range_seg_t *rs; zfs_range_seg_t *rs;
range_seg_max_t rsearch, rs_tmp; range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size; uint64_t end = start + size;
boolean_t left_over, right_over; boolean_t left_over, right_over;
VERIFY3U(size, !=, 0); VERIFY3U(size, !=, 0);
VERIFY3U(size, <=, rt->rt_space); VERIFY3U(size, <=, rt->rt_space);
if (rt->rt_type == RANGE_SEG64) if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start); ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start); zfs_rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end); zfs_rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/* Make sure we completely overlap with someone */ /* Make sure we completely overlap with someone */
@ -474,49 +477,49 @@ range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
*/ */
if (rt->rt_gap != 0) { if (rt->rt_gap != 0) {
if (do_fill) { if (do_fill) {
if (rs_get_fill(rs, rt) == size) { if (zfs_rs_get_fill(rs, rt) == size) {
start = rs_get_start(rs, rt); start = zfs_rs_get_start(rs, rt);
end = rs_get_end(rs, rt); end = zfs_rs_get_end(rs, rt);
size = end - start; size = end - start;
} else { } else {
range_tree_adjust_fill(rt, rs, -size); zfs_range_tree_adjust_fill(rt, rs, -size);
return; return;
} }
} else if (rs_get_start(rs, rt) != start || } else if (zfs_rs_get_start(rs, rt) != start ||
rs_get_end(rs, rt) != end) { zfs_rs_get_end(rs, rt) != end) {
zfs_panic_recover("zfs: freeing partial segment of " zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llx size=%llx) of " "gap tree (offset=%llx size=%llx) of "
"(offset=%llx size=%llx)", "(offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size, (longlong_t)start, (longlong_t)size,
(longlong_t)rs_get_start(rs, rt), (longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt) - rs_get_start(rs, (longlong_t)zfs_rs_get_end(rs, rt) -
rt)); zfs_rs_get_start(rs, rt));
return; return;
} }
} }
VERIFY3U(rs_get_start(rs, rt), <=, start); VERIFY3U(zfs_rs_get_start(rs, rt), <=, start);
VERIFY3U(rs_get_end(rs, rt), >=, end); VERIFY3U(zfs_rs_get_end(rs, rt), >=, end);
left_over = (rs_get_start(rs, rt) != start); left_over = (zfs_rs_get_start(rs, rt) != start);
right_over = (rs_get_end(rs, rt) != end); right_over = (zfs_rs_get_end(rs, rt) != end);
range_tree_stat_decr(rt, rs); zfs_range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) { if (left_over && right_over) {
range_seg_max_t newseg; range_seg_max_t newseg;
rs_set_start(&newseg, rt, end); zfs_rs_set_start(&newseg, rt, end);
rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt)); zfs_rs_set_end_raw(&newseg, rt, zfs_rs_get_end_raw(rs, rt));
rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end); zfs_rs_set_fill(&newseg, rt, zfs_rs_get_end(rs, rt) - end);
range_tree_stat_incr(rt, &newseg); zfs_range_tree_stat_incr(rt, &newseg);
// This modifies the buffer already inside the range tree // This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start); zfs_rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt); zfs_rs_copy(rs, &rs_tmp, rt);
if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL) if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
zfs_btree_add_idx(&rt->rt_root, &newseg, &where); zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
else else
@ -526,12 +529,12 @@ range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg); rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
} else if (left_over) { } else if (left_over) {
// This modifies the buffer already inside the range tree // This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start); zfs_rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt); zfs_rs_copy(rs, &rs_tmp, rt);
} else if (right_over) { } else if (right_over) {
// This modifies the buffer already inside the range tree // This modifies the buffer already inside the range tree
rs_set_start(rs, rt, end); zfs_rs_set_start(rs, rt, end);
rs_copy(rs, &rs_tmp, rt); zfs_rs_copy(rs, &rs_tmp, rt);
} else { } else {
zfs_btree_remove_idx(&rt->rt_root, &where); zfs_btree_remove_idx(&rt->rt_root, &where);
rs = NULL; rs = NULL;
@ -543,9 +546,9 @@ range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
* the size, since we do not support removing partial segments * the size, since we do not support removing partial segments
* of range trees with gaps. * of range trees with gaps.
*/ */
rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) - zfs_zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
rs_get_start_raw(rs, rt)); zfs_rs_get_start_raw(rs, rt));
range_tree_stat_incr(rt, &rs_tmp); zfs_range_tree_stat_incr(rt, &rs_tmp);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg); rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
@ -555,76 +558,78 @@ range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
} }
void void
range_tree_remove(void *arg, uint64_t start, uint64_t size) zfs_range_tree_remove(void *arg, uint64_t start, uint64_t size)
{ {
range_tree_remove_impl(arg, start, size, B_FALSE); zfs_range_tree_remove_impl(arg, start, size, B_FALSE);
} }
void void
range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size) zfs_range_tree_remove_fill(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{ {
range_tree_remove_impl(rt, start, size, B_TRUE); zfs_range_tree_remove_impl(rt, start, size, B_TRUE);
} }
void void
range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs, zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
uint64_t newstart, uint64_t newsize) uint64_t newstart, uint64_t newsize)
{ {
int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt)); int64_t delta = newsize - (zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt));
range_tree_stat_decr(rt, rs); zfs_range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg); rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_start(rs, rt, newstart); zfs_rs_set_start(rs, rt, newstart);
rs_set_end(rs, rt, newstart + newsize); zfs_rs_set_end(rs, rt, newstart + newsize);
range_tree_stat_incr(rt, rs); zfs_range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg); rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
rt->rt_space += delta; rt->rt_space += delta;
} }
static range_seg_t * static zfs_range_seg_t *
range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size) zfs_range_tree_find_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{ {
range_seg_max_t rsearch; range_seg_max_t rsearch;
uint64_t end = start + size; uint64_t end = start + size;
VERIFY(size != 0); VERIFY(size != 0);
rs_set_start(&rsearch, rt, start); zfs_rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end); zfs_rs_set_end(&rsearch, rt, end);
return (zfs_btree_find(&rt->rt_root, &rsearch, NULL)); return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
} }
range_seg_t * zfs_range_seg_t *
range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size) zfs_range_tree_find(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{ {
if (rt->rt_type == RANGE_SEG64) if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start); ASSERT3U(start + size, >, start);
range_seg_t *rs = range_tree_find_impl(rt, start, size); zfs_range_seg_t *rs = zfs_range_tree_find_impl(rt, start, size);
if (rs != NULL && rs_get_start(rs, rt) <= start && if (rs != NULL && zfs_rs_get_start(rs, rt) <= start &&
rs_get_end(rs, rt) >= start + size) { zfs_rs_get_end(rs, rt) >= start + size) {
return (rs); return (rs);
} }
return (NULL); return (NULL);
} }
void void
range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size) zfs_range_tree_verify_not_present(zfs_range_tree_t *rt, uint64_t off,
uint64_t size)
{ {
range_seg_t *rs = range_tree_find(rt, off, size); zfs_range_seg_t *rs = zfs_range_tree_find(rt, off, size);
if (rs != NULL) if (rs != NULL)
panic("segment already in tree; rs=%p", (void *)rs); panic("segment already in tree; rs=%p", (void *)rs);
} }
boolean_t boolean_t
range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size) zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{ {
return (range_tree_find(rt, start, size) != NULL); return (zfs_range_tree_find(rt, start, size) != NULL);
} }
/* /*
@ -633,31 +638,32 @@ range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
* isn't. * isn't.
*/ */
boolean_t boolean_t
range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size, zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
uint64_t *ostart, uint64_t *osize) uint64_t *ostart, uint64_t *osize)
{ {
if (rt->rt_type == RANGE_SEG64) if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start); ASSERT3U(start + size, >, start);
range_seg_max_t rsearch; range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start); zfs_rs_set_start(&rsearch, rt, start);
rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1); zfs_rs_set_end_raw(&rsearch, rt, zfs_rs_get_start_raw(&rsearch, rt) +
1);
zfs_btree_index_t where; zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where); zfs_range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
if (rs != NULL) { if (rs != NULL) {
*ostart = start; *ostart = start;
*osize = MIN(size, rs_get_end(rs, rt) - start); *osize = MIN(size, zfs_rs_get_end(rs, rt) - start);
return (B_TRUE); return (B_TRUE);
} }
rs = zfs_btree_next(&rt->rt_root, &where, &where); rs = zfs_btree_next(&rt->rt_root, &where, &where);
if (rs == NULL || rs_get_start(rs, rt) > start + size) if (rs == NULL || zfs_rs_get_start(rs, rt) > start + size)
return (B_FALSE); return (B_FALSE);
*ostart = rs_get_start(rs, rt); *ostart = zfs_rs_get_start(rs, rt);
*osize = MIN(start + size, rs_get_end(rs, rt)) - *osize = MIN(start + size, zfs_rs_get_end(rs, rt)) -
rs_get_start(rs, rt); zfs_rs_get_start(rs, rt);
return (B_TRUE); return (B_TRUE);
} }
@ -666,29 +672,29 @@ range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
* it is currently in the tree. * it is currently in the tree.
*/ */
void void
range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size) zfs_range_tree_clear(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{ {
range_seg_t *rs; zfs_range_seg_t *rs;
if (size == 0) if (size == 0)
return; return;
if (rt->rt_type == RANGE_SEG64) if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start); ASSERT3U(start + size, >, start);
while ((rs = range_tree_find_impl(rt, start, size)) != NULL) { while ((rs = zfs_range_tree_find_impl(rt, start, size)) != NULL) {
uint64_t free_start = MAX(rs_get_start(rs, rt), start); uint64_t free_start = MAX(zfs_rs_get_start(rs, rt), start);
uint64_t free_end = MIN(rs_get_end(rs, rt), start + size); uint64_t free_end = MIN(zfs_rs_get_end(rs, rt), start + size);
range_tree_remove(rt, free_start, free_end - free_start); zfs_range_tree_remove(rt, free_start, free_end - free_start);
} }
} }
void void
range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst) zfs_range_tree_swap(zfs_range_tree_t **rtsrc, zfs_range_tree_t **rtdst)
{ {
range_tree_t *rt; zfs_range_tree_t *rt;
ASSERT0(range_tree_space(*rtdst)); ASSERT0(zfs_range_tree_space(*rtdst));
ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root)); ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
rt = *rtsrc; rt = *rtsrc;
@ -697,19 +703,20 @@ range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
} }
void void
range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg) zfs_range_tree_vacate(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
void *arg)
{ {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL) if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
rt->rt_ops->rtop_vacate(rt, rt->rt_arg); rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
if (func != NULL) { if (func != NULL) {
range_seg_t *rs; zfs_range_seg_t *rs;
zfs_btree_index_t *cookie = NULL; zfs_btree_index_t *cookie = NULL;
while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) != while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
NULL) { NULL) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) - func(arg, zfs_rs_get_start(rs, rt),
rs_get_start(rs, rt)); zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt));
} }
} else { } else {
zfs_btree_clear(&rt->rt_root); zfs_btree_clear(&rt->rt_root);
@ -720,39 +727,40 @@ range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
} }
void void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg) zfs_range_tree_walk(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
void *arg)
{ {
zfs_btree_index_t where; zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) { rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) - func(arg, zfs_rs_get_start(rs, rt), zfs_rs_get_end(rs, rt) -
rs_get_start(rs, rt)); zfs_rs_get_start(rs, rt));
} }
} }
range_seg_t * zfs_range_seg_t *
range_tree_first(range_tree_t *rt) zfs_range_tree_first(zfs_range_tree_t *rt)
{ {
return (zfs_btree_first(&rt->rt_root, NULL)); return (zfs_btree_first(&rt->rt_root, NULL));
} }
uint64_t uint64_t
range_tree_space(range_tree_t *rt) zfs_range_tree_space(zfs_range_tree_t *rt)
{ {
return (rt->rt_space); return (rt->rt_space);
} }
uint64_t uint64_t
range_tree_numsegs(range_tree_t *rt) zfs_range_tree_numsegs(zfs_range_tree_t *rt)
{ {
return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root)); return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
} }
boolean_t boolean_t
range_tree_is_empty(range_tree_t *rt) zfs_range_tree_is_empty(zfs_range_tree_t *rt)
{ {
ASSERT(rt != NULL); ASSERT(rt != NULL);
return (range_tree_space(rt) == 0); return (zfs_range_tree_space(rt) == 0);
} }
/* /*
@ -760,46 +768,46 @@ range_tree_is_empty(range_tree_t *rt)
* from removefrom. Add non-overlapping leftovers to addto. * from removefrom. Add non-overlapping leftovers to addto.
*/ */
void void
range_tree_remove_xor_add_segment(uint64_t start, uint64_t end, zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
range_tree_t *removefrom, range_tree_t *addto) zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
{ {
zfs_btree_index_t where; zfs_btree_index_t where;
range_seg_max_t starting_rs; range_seg_max_t starting_rs;
rs_set_start(&starting_rs, removefrom, start); zfs_rs_set_start(&starting_rs, removefrom, start);
rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs, zfs_rs_set_end_raw(&starting_rs, removefrom,
removefrom) + 1); zfs_rs_get_start_raw(&starting_rs, removefrom) + 1);
range_seg_t *curr = zfs_btree_find(&removefrom->rt_root, zfs_range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
&starting_rs, &where); &starting_rs, &where);
if (curr == NULL) if (curr == NULL)
curr = zfs_btree_next(&removefrom->rt_root, &where, &where); curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
range_seg_t *next; zfs_range_seg_t *next;
for (; curr != NULL; curr = next) { for (; curr != NULL; curr = next) {
if (start == end) if (start == end)
return; return;
VERIFY3U(start, <, end); VERIFY3U(start, <, end);
/* there is no overlap */ /* there is no overlap */
if (end <= rs_get_start(curr, removefrom)) { if (end <= zfs_rs_get_start(curr, removefrom)) {
range_tree_add(addto, start, end - start); zfs_range_tree_add(addto, start, end - start);
return; return;
} }
uint64_t overlap_start = MAX(rs_get_start(curr, removefrom), uint64_t overlap_start = MAX(zfs_rs_get_start(curr, removefrom),
start); start);
uint64_t overlap_end = MIN(rs_get_end(curr, removefrom), uint64_t overlap_end = MIN(zfs_rs_get_end(curr, removefrom),
end); end);
uint64_t overlap_size = overlap_end - overlap_start; uint64_t overlap_size = overlap_end - overlap_start;
ASSERT3S(overlap_size, >, 0); ASSERT3S(overlap_size, >, 0);
range_seg_max_t rs; range_seg_max_t rs;
rs_copy(curr, &rs, removefrom); zfs_rs_copy(curr, &rs, removefrom);
range_tree_remove(removefrom, overlap_start, overlap_size); zfs_range_tree_remove(removefrom, overlap_start, overlap_size);
if (start < overlap_start) if (start < overlap_start)
range_tree_add(addto, start, overlap_start - start); zfs_range_tree_add(addto, start, overlap_start - start);
start = overlap_end; start = overlap_end;
next = zfs_btree_find(&removefrom->rt_root, &rs, &where); next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
@ -814,7 +822,7 @@ range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
* area to process. * area to process.
*/ */
if (next != NULL) { if (next != NULL) {
ASSERT(start == end || start == rs_get_end(&rs, ASSERT(start == end || start == zfs_rs_get_end(&rs,
removefrom)); removefrom));
} }
@ -824,7 +832,7 @@ range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
if (start != end) { if (start != end) {
VERIFY3U(start, <, end); VERIFY3U(start, <, end);
range_tree_add(addto, start, end - start); zfs_range_tree_add(addto, start, end - start);
} else { } else {
VERIFY3U(start, ==, end); VERIFY3U(start, ==, end);
} }
@ -835,33 +843,33 @@ range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
* from removefrom. Otherwise, add it to addto. * from removefrom. Otherwise, add it to addto.
*/ */
void void
range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom, zfs_range_tree_remove_xor_add(zfs_range_tree_t *rt,
range_tree_t *addto) zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
{ {
zfs_btree_index_t where; zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs; for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) { rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
range_tree_remove_xor_add_segment(rs_get_start(rs, rt), zfs_range_tree_remove_xor_add_segment(zfs_rs_get_start(rs, rt),
rs_get_end(rs, rt), removefrom, addto); zfs_rs_get_end(rs, rt), removefrom, addto);
} }
} }
uint64_t uint64_t
range_tree_min(range_tree_t *rt) zfs_range_tree_min(zfs_range_tree_t *rt)
{ {
range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL); zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_start(rs, rt) : 0); return (rs != NULL ? zfs_rs_get_start(rs, rt) : 0);
} }
uint64_t uint64_t
range_tree_max(range_tree_t *rt) zfs_range_tree_max(zfs_range_tree_t *rt)
{ {
range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL); zfs_range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_end(rs, rt) : 0); return (rs != NULL ? zfs_rs_get_end(rs, rt) : 0);
} }
uint64_t uint64_t
range_tree_span(range_tree_t *rt) zfs_range_tree_span(zfs_range_tree_t *rt)
{ {
return (range_tree_max(rt) - range_tree_min(rt)); return (zfs_range_tree_max(rt) - zfs_range_tree_min(rt));
} }

View File

@ -9861,7 +9861,7 @@ vdev_indirect_state_sync_verify(vdev_t *vd)
* happen in syncing context, the obsolete segments * happen in syncing context, the obsolete segments
* tree must be empty when we start syncing. * tree must be empty when we start syncing.
*/ */
ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); ASSERT0(zfs_range_tree_space(vd->vdev_obsolete_segments));
} }
/* /*

View File

@ -235,9 +235,9 @@ spa_checkpoint_discard_sync_callback(space_map_entry_t *sme, void *arg)
* potentially save ourselves from future headaches. * potentially save ourselves from future headaches.
*/ */
mutex_enter(&ms->ms_lock); mutex_enter(&ms->ms_lock);
if (range_tree_is_empty(ms->ms_freeing)) if (zfs_range_tree_is_empty(ms->ms_freeing))
vdev_dirty(vd, VDD_METASLAB, ms, sdc->sdc_txg); vdev_dirty(vd, VDD_METASLAB, ms, sdc->sdc_txg);
range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run); zfs_range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock); mutex_exit(&ms->ms_lock);
ASSERT3U(vd->vdev_spa->spa_checkpoint_info.sci_dspace, >=, ASSERT3U(vd->vdev_spa->spa_checkpoint_info.sci_dspace, >=,

View File

@ -1108,11 +1108,11 @@ spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
switch (sme->sme_type) { switch (sme->sme_type) {
case SM_ALLOC: case SM_ALLOC:
range_tree_remove_xor_add_segment(offset, offset + size, zfs_range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_frees, ms->ms_unflushed_allocs); ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
break; break;
case SM_FREE: case SM_FREE:
range_tree_remove_xor_add_segment(offset, offset + size, zfs_range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_allocs, ms->ms_unflushed_frees); ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
break; break;
default: default:
@ -1251,14 +1251,14 @@ out:
m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) { m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
mutex_enter(&m->ms_lock); mutex_enter(&m->ms_lock);
m->ms_allocated_space = space_map_allocated(m->ms_sm) + m->ms_allocated_space = space_map_allocated(m->ms_sm) +
range_tree_space(m->ms_unflushed_allocs) - zfs_range_tree_space(m->ms_unflushed_allocs) -
range_tree_space(m->ms_unflushed_frees); zfs_range_tree_space(m->ms_unflushed_frees);
vdev_t *vd = m->ms_group->mg_vd; vdev_t *vd = m->ms_group->mg_vd;
metaslab_space_update(vd, m->ms_group->mg_class, metaslab_space_update(vd, m->ms_group->mg_class,
range_tree_space(m->ms_unflushed_allocs), 0, 0); zfs_range_tree_space(m->ms_unflushed_allocs), 0, 0);
metaslab_space_update(vd, m->ms_group->mg_class, metaslab_space_update(vd, m->ms_group->mg_class,
-range_tree_space(m->ms_unflushed_frees), 0, 0); -zfs_range_tree_space(m->ms_unflushed_frees), 0, 0);
ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK); ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
metaslab_recalculate_weight_and_sort(m); metaslab_recalculate_weight_and_sort(m);
@ -1317,8 +1317,8 @@ spa_ld_unflushed_txgs(vdev_t *vd)
ms->ms_unflushed_txg = entry.msp_unflushed_txg; ms->ms_unflushed_txg = entry.msp_unflushed_txg;
ms->ms_unflushed_dirty = B_FALSE; ms->ms_unflushed_dirty = B_FALSE;
ASSERT(range_tree_is_empty(ms->ms_unflushed_allocs)); ASSERT(zfs_range_tree_is_empty(ms->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(ms->ms_unflushed_frees)); ASSERT(zfs_range_tree_is_empty(ms->ms_unflushed_frees));
if (ms->ms_unflushed_txg != 0) { if (ms->ms_unflushed_txg != 0) {
mutex_enter(&spa->spa_flushed_ms_lock); mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, ms); avl_add(&spa->spa_metaslabs_by_flushed, ms);

View File

@ -393,7 +393,7 @@ space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
typedef struct space_map_load_arg { typedef struct space_map_load_arg {
space_map_t *smla_sm; space_map_t *smla_sm;
range_tree_t *smla_rt; zfs_range_tree_t *smla_rt;
maptype_t smla_type; maptype_t smla_type;
} space_map_load_arg_t; } space_map_load_arg_t;
@ -402,11 +402,13 @@ space_map_load_callback(space_map_entry_t *sme, void *arg)
{ {
space_map_load_arg_t *smla = arg; space_map_load_arg_t *smla = arg;
if (sme->sme_type == smla->smla_type) { if (sme->sme_type == smla->smla_type) {
VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=, VERIFY3U(zfs_range_tree_space(smla->smla_rt) + sme->sme_run, <=,
smla->smla_sm->sm_size); smla->smla_sm->sm_size);
range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run); zfs_range_tree_add(smla->smla_rt, sme->sme_offset,
sme->sme_run);
} else { } else {
range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run); zfs_range_tree_remove(smla->smla_rt, sme->sme_offset,
sme->sme_run);
} }
return (0); return (0);
@ -417,15 +419,15 @@ space_map_load_callback(space_map_entry_t *sme, void *arg)
* read the first 'length' bytes of the spacemap. * read the first 'length' bytes of the spacemap.
*/ */
int int
space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype, space_map_load_length(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t length) uint64_t length)
{ {
space_map_load_arg_t smla; space_map_load_arg_t smla;
VERIFY0(range_tree_space(rt)); VERIFY0(zfs_range_tree_space(rt));
if (maptype == SM_FREE) if (maptype == SM_FREE)
range_tree_add(rt, sm->sm_start, sm->sm_size); zfs_range_tree_add(rt, sm->sm_start, sm->sm_size);
smla.smla_rt = rt; smla.smla_rt = rt;
smla.smla_sm = sm; smla.smla_sm = sm;
@ -434,7 +436,7 @@ space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
space_map_load_callback, &smla); space_map_load_callback, &smla);
if (err != 0) if (err != 0)
range_tree_vacate(rt, NULL, NULL); zfs_range_tree_vacate(rt, NULL, NULL);
return (err); return (err);
} }
@ -444,7 +446,7 @@ space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
* are added to the range tree, other segment types are removed. * are added to the range tree, other segment types are removed.
*/ */
int int
space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) space_map_load(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype)
{ {
return (space_map_load_length(sm, rt, maptype, space_map_length(sm))); return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
} }
@ -460,7 +462,7 @@ space_map_histogram_clear(space_map_t *sm)
} }
boolean_t boolean_t
space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) space_map_histogram_verify(space_map_t *sm, zfs_range_tree_t *rt)
{ {
/* /*
* Verify that the in-core range tree does not have any * Verify that the in-core range tree does not have any
@ -474,7 +476,7 @@ space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
} }
void void
space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt, dmu_tx_t *tx)
{ {
int idx = 0; int idx = 0;
@ -667,7 +669,7 @@ space_map_write_seg(space_map_t *sm, uint64_t rstart, uint64_t rend,
* take effect. * take effect.
*/ */
static void static void
space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype, space_map_write_impl(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx) uint64_t vdev_id, dmu_tx_t *tx)
{ {
spa_t *spa = tx->tx_pool->dp_spa; spa_t *spa = tx->tx_pool->dp_spa;
@ -700,12 +702,12 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
zfs_btree_t *t = &rt->rt_root; zfs_btree_t *t = &rt->rt_root;
zfs_btree_index_t where; zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL; for (zfs_range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL;
rs = zfs_btree_next(t, &where, &where)) { rs = zfs_btree_next(t, &where, &where)) {
uint64_t offset = (rs_get_start(rs, rt) - sm->sm_start) >> uint64_t offset = (zfs_rs_get_start(rs, rt) - sm->sm_start) >>
sm->sm_shift;
uint64_t length = (rs_get_end(rs, rt) - rs_get_start(rs, rt)) >>
sm->sm_shift; sm->sm_shift;
uint64_t length = (zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt)) >> sm->sm_shift;
uint8_t words = 1; uint8_t words = 1;
/* /*
@ -730,8 +732,9 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
random_in_range(100) == 0))) random_in_range(100) == 0)))
words = 2; words = 2;
space_map_write_seg(sm, rs_get_start(rs, rt), rs_get_end(rs, space_map_write_seg(sm, zfs_rs_get_start(rs, rt),
rt), maptype, vdev_id, words, &db, FTAG, tx); zfs_rs_get_end(rs, rt), maptype, vdev_id, words, &db,
FTAG, tx);
} }
dmu_buf_rele(db, FTAG); dmu_buf_rele(db, FTAG);
@ -753,7 +756,7 @@ space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
* for synchronizing writes to the space map. * for synchronizing writes to the space map.
*/ */
void void
space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, space_map_write(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx) uint64_t vdev_id, dmu_tx_t *tx)
{ {
ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os))); ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os)));
@ -768,18 +771,18 @@ space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
*/ */
sm->sm_phys->smp_object = sm->sm_object; sm->sm_phys->smp_object = sm->sm_object;
if (range_tree_is_empty(rt)) { if (zfs_range_tree_is_empty(rt)) {
VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
return; return;
} }
if (maptype == SM_ALLOC) if (maptype == SM_ALLOC)
sm->sm_phys->smp_alloc += range_tree_space(rt); sm->sm_phys->smp_alloc += zfs_range_tree_space(rt);
else else
sm->sm_phys->smp_alloc -= range_tree_space(rt); sm->sm_phys->smp_alloc -= zfs_range_tree_space(rt);
uint64_t nodes = zfs_btree_numnodes(&rt->rt_root); uint64_t nodes = zfs_btree_numnodes(&rt->rt_root);
uint64_t rt_space = range_tree_space(rt); uint64_t rt_space = zfs_range_tree_space(rt);
space_map_write_impl(sm, rt, maptype, vdev_id, tx); space_map_write_impl(sm, rt, maptype, vdev_id, tx);
@ -788,7 +791,7 @@ space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
* while we were in the middle of writing it out. * while we were in the middle of writing it out.
*/ */
VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root)); VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root));
VERIFY3U(range_tree_space(rt), ==, rt_space); VERIFY3U(zfs_range_tree_space(rt), ==, rt_space);
} }
static int static int
@ -960,7 +963,7 @@ space_map_free(space_map_t *sm, dmu_tx_t *tx)
* the given space map. * the given space map.
*/ */
uint64_t uint64_t
space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt, space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
uint64_t vdev_id) uint64_t vdev_id)
{ {
spa_t *spa = dmu_objset_spa(sm->sm_os); spa_t *spa = dmu_objset_spa(sm->sm_os);

View File

@ -107,14 +107,14 @@ space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
* Convert (or add) a range tree into a reference tree. * Convert (or add) a range tree into a reference tree.
*/ */
void void
space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt) space_reftree_add_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t refcnt)
{ {
zfs_btree_index_t where; zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs; rs = for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
zfs_btree_next(&rt->rt_root, &where, &where)) { rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
space_reftree_add_seg(t, rs_get_start(rs, rt), rs_get_end(rs, space_reftree_add_seg(t, zfs_rs_get_start(rs, rt),
rt), refcnt); zfs_rs_get_end(rs, rt), refcnt);
} }
} }
@ -123,13 +123,13 @@ space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt)
* all members of the reference tree for which refcnt >= minref. * all members of the reference tree for which refcnt >= minref.
*/ */
void void
space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, int64_t minref) space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t minref)
{ {
uint64_t start = -1ULL; uint64_t start = -1ULL;
int64_t refcnt = 0; int64_t refcnt = 0;
space_ref_t *sr; space_ref_t *sr;
range_tree_vacate(rt, NULL, NULL); zfs_range_tree_vacate(rt, NULL, NULL);
for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) { for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
refcnt += sr->sr_refcnt; refcnt += sr->sr_refcnt;
@ -142,7 +142,8 @@ space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, int64_t minref)
uint64_t end = sr->sr_offset; uint64_t end = sr->sr_offset;
ASSERT(start <= end); ASSERT(start <= end);
if (end > start) if (end > start)
range_tree_add(rt, start, end - start); zfs_range_tree_add(rt, start, end -
start);
start = -1ULL; start = -1ULL;
} }
} }

View File

@ -677,8 +677,8 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL, vd->vdev_obsolete_segments = zfs_range_tree_create(NULL,
0, 0); ZFS_RANGE_SEG64, NULL, 0, 0);
/* /*
* Initialize rate limit structs for events. We rate limit ZIO delay * Initialize rate limit structs for events. We rate limit ZIO delay
@ -732,8 +732,8 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL); cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0, vd->vdev_dtl[t] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
0); NULL, 0, 0);
} }
txg_list_create(&vd->vdev_ms_list, spa, txg_list_create(&vd->vdev_ms_list, spa,
@ -1155,8 +1155,8 @@ vdev_free(vdev_t *vd)
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm); space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); zfs_range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]); zfs_range_tree_destroy(vd->vdev_dtl[t]);
} }
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
@ -1173,7 +1173,7 @@ vdev_free(vdev_t *vd)
space_map_close(vd->vdev_obsolete_sm); space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL; vd->vdev_obsolete_sm = NULL;
} }
range_tree_destroy(vd->vdev_obsolete_segments); zfs_range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock); rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock); mutex_destroy(&vd->vdev_obsolete_lock);
@ -1283,7 +1283,7 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
tvd->vdev_indirect_config = svd->vdev_indirect_config; tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births; tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments, zfs_range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments); &tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0; svd->vdev_indirect_config.vic_mapping_object = 0;
@ -2969,22 +2969,22 @@ vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
void void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{ {
range_tree_t *rt = vd->vdev_dtl[t]; zfs_range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES); ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev); ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa)); ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size)) if (!zfs_range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size); zfs_range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
} }
boolean_t boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{ {
range_tree_t *rt = vd->vdev_dtl[t]; zfs_range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE; boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES); ASSERT(t < DTL_TYPES);
@ -2999,8 +2999,8 @@ vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
* always checksummed. * always checksummed.
*/ */
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt)) if (!zfs_range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size); dirty = zfs_range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
return (dirty); return (dirty);
@ -3009,11 +3009,11 @@ vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
boolean_t boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{ {
range_tree_t *rt = vd->vdev_dtl[t]; zfs_range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty; boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt); empty = zfs_range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
return (empty); return (empty);
@ -3060,10 +3060,10 @@ static uint64_t
vdev_dtl_min(vdev_t *vd) vdev_dtl_min(vdev_t *vd)
{ {
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children); ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1); return (zfs_range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
} }
/* /*
@ -3073,10 +3073,10 @@ static uint64_t
vdev_dtl_max(vdev_t *vd) vdev_dtl_max(vdev_t *vd)
{ {
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children); ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING])); return (zfs_range_tree_max(vd->vdev_dtl[DTL_MISSING]));
} }
/* /*
@ -3098,7 +3098,7 @@ vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
if (vd->vdev_resilver_deferred) if (vd->vdev_resilver_deferred)
return (B_FALSE); return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) if (zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE); return (B_TRUE);
if (rebuild_done) { if (rebuild_done) {
@ -3187,7 +3187,7 @@ vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
} }
if (scrub_txg != 0 && if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { !zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE; wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d " zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu", "dtl:%llu/%llu errors:%llu",
@ -3243,7 +3243,8 @@ vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
vd->vdev_dtl[DTL_MISSING], 1); vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree); space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { if (!zfs_range_tree_is_empty(
vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu", zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd), (u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd)); (u_longlong_t)vdev_dtl_max(vd));
@ -3251,12 +3252,13 @@ vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
zfs_dbgmsg("DTL_MISSING is now empty"); zfs_dbgmsg("DTL_MISSING is now empty");
} }
} }
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); zfs_range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING], zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); zfs_range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done) if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); zfs_range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL,
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); NULL);
zfs_range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
/* /*
* For the faulting case, treat members of a replacing vdev * For the faulting case, treat members of a replacing vdev
@ -3267,10 +3269,10 @@ vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
if (!vdev_readable(vd) || if (!vdev_readable(vd) ||
(faulting && vd->vdev_parent != NULL && (faulting && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_replacing_ops)) { vd->vdev_parent->vdev_ops == &vdev_replacing_ops)) {
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); zfs_range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
} else { } else {
range_tree_walk(vd->vdev_dtl[DTL_MISSING], zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); zfs_range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
} }
/* /*
@ -3279,8 +3281,8 @@ vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
* the top level so that we persist the change. * the top level so that we persist the change.
*/ */
if (txg != 0 && if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { zfs_range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) { if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0; vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top); vdev_config_dirty(vd->vdev_top);
@ -3374,7 +3376,7 @@ vdev_dtl_load(vdev_t *vd)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset; objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt; zfs_range_tree_t *rt;
int error = 0; int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
@ -3392,17 +3394,17 @@ vdev_dtl_load(vdev_t *vd)
return (error); return (error);
ASSERT(vd->vdev_dtl_sm != NULL); ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); rt = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC); error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) { if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, zfs_range_tree_walk(rt, zfs_range_tree_add,
vd->vdev_dtl[DTL_MISSING]); vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
} }
range_tree_vacate(rt, NULL, NULL); zfs_range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt); zfs_range_tree_destroy(rt);
return (error); return (error);
} }
@ -3496,9 +3498,9 @@ static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg) vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; zfs_range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset; objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync; zfs_range_tree_t *rtsync;
dmu_tx_t *tx; dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm); uint64_t object = space_map_object(vd->vdev_dtl_sm);
@ -3540,17 +3542,17 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
ASSERT(vd->vdev_dtl_sm != NULL); ASSERT(vd->vdev_dtl_sm != NULL);
} }
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); rtsync = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync); zfs_range_tree_walk(rt, zfs_range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx); space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL); zfs_range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync); zfs_range_tree_destroy(rtsync);
/* /*
* If the object for the space map has changed then dirty * If the object for the space map has changed then dirty
@ -3620,7 +3622,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
if (vd->vdev_children == 0) { if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && if (!zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) { vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd); thismin = vdev_dtl_min(vd);
@ -4064,7 +4066,7 @@ vdev_sync(vdev_t *vd, uint64_t txg)
ASSERT3U(txg, ==, spa->spa_syncing_txg); ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) { if (zfs_range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing || ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops); vd->vdev_ops == &vdev_indirect_ops);

View File

@ -333,7 +333,7 @@ vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
mutex_enter(&vd->vdev_obsolete_lock); mutex_enter(&vd->vdev_obsolete_lock);
range_tree_add(vd->vdev_obsolete_segments, offset, size); zfs_range_tree_add(vd->vdev_obsolete_segments, offset, size);
mutex_exit(&vd->vdev_obsolete_lock); mutex_exit(&vd->vdev_obsolete_lock);
vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa)); vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
} }
@ -816,7 +816,7 @@ vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config; vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
ASSERT3U(vic->vic_mapping_object, !=, 0); ASSERT3U(vic->vic_mapping_object, !=, 0);
ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0); ASSERT(zfs_range_tree_space(vd->vdev_obsolete_segments) > 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)); ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
@ -845,7 +845,7 @@ vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
space_map_write(vd->vdev_obsolete_sm, space_map_write(vd->vdev_obsolete_sm,
vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx); vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
} }
int int

View File

@ -330,13 +330,14 @@ vdev_initialize_block_free(abd_t *data)
static int static int
vdev_initialize_ranges(vdev_t *vd, abd_t *data) vdev_initialize_ranges(vdev_t *vd, abd_t *data)
{ {
range_tree_t *rt = vd->vdev_initialize_tree; zfs_range_tree_t *rt = vd->vdev_initialize_tree;
zfs_btree_t *bt = &rt->rt_root; zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where; zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL; for (zfs_range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
rs = zfs_btree_next(bt, &where, &where)) { rs = zfs_btree_next(bt, &where, &where)) {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt); uint64_t size = zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt);
/* Split range into legally-sized physical chunks */ /* Split range into legally-sized physical chunks */
uint64_t writes_required = uint64_t writes_required =
@ -346,7 +347,7 @@ vdev_initialize_ranges(vdev_t *vd, abd_t *data)
int error; int error;
error = vdev_initialize_write(vd, error = vdev_initialize_write(vd,
VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) + VDEV_LABEL_START_SIZE + zfs_rs_get_start(rs, rt) +
(w * zfs_initialize_chunk_size), (w * zfs_initialize_chunk_size),
MIN(size - (w * zfs_initialize_chunk_size), MIN(size - (w * zfs_initialize_chunk_size),
zfs_initialize_chunk_size), data); zfs_initialize_chunk_size), data);
@ -440,13 +441,13 @@ vdev_initialize_calculate_progress(vdev_t *vd)
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
zfs_btree_index_t where; zfs_btree_index_t where;
range_tree_t *rt = msp->ms_allocatable; zfs_range_tree_t *rt = msp->ms_allocatable;
for (range_seg_t *rs = for (zfs_range_seg_t *rs =
zfs_btree_first(&rt->rt_root, &where); rs; zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where, rs = zfs_btree_next(&rt->rt_root, &where,
&where)) { &where)) {
logical_rs.rs_start = rs_get_start(rs, rt); logical_rs.rs_start = zfs_rs_get_start(rs, rt);
logical_rs.rs_end = rs_get_end(rs, rt); logical_rs.rs_end = zfs_rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs, vdev_xlate_walk(vd, &logical_rs,
vdev_initialize_xlate_progress, vd); vdev_initialize_xlate_progress, vd);
@ -503,7 +504,7 @@ vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs)
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start); ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start, zfs_range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start); physical_rs->rs_end - physical_rs->rs_start);
} }
@ -539,8 +540,8 @@ vdev_initialize_thread(void *arg)
abd_t *deadbeef = vdev_initialize_block_alloc(); abd_t *deadbeef = vdev_initialize_block_alloc();
vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL, vd->vdev_initialize_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
0, 0); NULL, 0, 0);
for (uint64_t i = 0; !vd->vdev_detached && for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) { i < vd->vdev_top->vdev_ms_count; i++) {
@ -563,15 +564,15 @@ vdev_initialize_thread(void *arg)
unload_when_done = B_TRUE; unload_when_done = B_TRUE;
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add, zfs_range_tree_walk(msp->ms_allocatable,
vd); vdev_initialize_range_add, vd);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
error = vdev_initialize_ranges(vd, deadbeef); error = vdev_initialize_ranges(vd, deadbeef);
metaslab_enable(msp, B_TRUE, unload_when_done); metaslab_enable(msp, B_TRUE, unload_when_done);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL); zfs_range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
if (error != 0) if (error != 0)
break; break;
} }
@ -584,7 +585,7 @@ vdev_initialize_thread(void *arg)
} }
mutex_exit(&vd->vdev_initialize_io_lock); mutex_exit(&vd->vdev_initialize_io_lock);
range_tree_destroy(vd->vdev_initialize_tree); zfs_range_tree_destroy(vd->vdev_initialize_tree);
vdev_initialize_block_free(deadbeef); vdev_initialize_block_free(deadbeef);
vd->vdev_initialize_tree = NULL; vd->vdev_initialize_tree = NULL;

View File

@ -3953,18 +3953,18 @@ vdev_raidz_expand_child_replacing(vdev_t *raidz_vd)
} }
static boolean_t static boolean_t
raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt, raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, zfs_range_tree_t *rt,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
uint_t ashift = vd->vdev_top->vdev_ashift; uint_t ashift = vd->vdev_top->vdev_ashift;
range_seg_t *rs = range_tree_first(rt); zfs_range_seg_t *rs = zfs_range_tree_first(rt);
if (rt == NULL) if (rt == NULL)
return (B_FALSE); return (B_FALSE);
uint64_t offset = rs_get_start(rs, rt); uint64_t offset = zfs_rs_get_start(rs, rt);
ASSERT(IS_P2ALIGNED(offset, 1 << ashift)); ASSERT(IS_P2ALIGNED(offset, 1 << ashift));
uint64_t size = rs_get_end(rs, rt) - offset; uint64_t size = zfs_rs_get_end(rs, rt) - offset;
ASSERT3U(size, >=, 1 << ashift); ASSERT3U(size, >=, 1 << ashift);
ASSERT(IS_P2ALIGNED(size, 1 << ashift)); ASSERT(IS_P2ALIGNED(size, 1 << ashift));
@ -4001,7 +4001,7 @@ raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt,
uint_t blocks = MIN(size >> ashift, next_overwrite_blkid - blkid); uint_t blocks = MIN(size >> ashift, next_overwrite_blkid - blkid);
size = (uint64_t)blocks << ashift; size = (uint64_t)blocks << ashift;
range_tree_remove(rt, offset, size); zfs_range_tree_remove(rt, offset, size);
uint_t reads = MIN(blocks, old_children); uint_t reads = MIN(blocks, old_children);
uint_t writes = MIN(blocks, vd->vdev_children); uint_t writes = MIN(blocks, vd->vdev_children);
@ -4553,12 +4553,13 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
* space (e.g. in ms_defer), and it's fine to copy that too. * space (e.g. in ms_defer), and it's fine to copy that too.
*/ */
uint64_t shift, start; uint64_t shift, start;
range_seg_type_t type = metaslab_calculate_range_tree_type( zfs_range_seg_type_t type = metaslab_calculate_range_tree_type(
raidvd, msp, &start, &shift); raidvd, msp, &start, &shift);
range_tree_t *rt = range_tree_create(NULL, type, NULL, zfs_range_tree_t *rt = zfs_range_tree_create(NULL, type, NULL,
start, shift); start, shift);
range_tree_add(rt, msp->ms_start, msp->ms_size); zfs_range_tree_add(rt, msp->ms_start, msp->ms_size);
range_tree_walk(msp->ms_allocatable, range_tree_remove, rt); zfs_range_tree_walk(msp->ms_allocatable, zfs_range_tree_remove,
rt);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
/* /*
@ -4572,8 +4573,8 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
int sectorsz = 1 << raidvd->vdev_ashift; int sectorsz = 1 << raidvd->vdev_ashift;
uint64_t ms_last_offset = msp->ms_start + uint64_t ms_last_offset = msp->ms_start +
msp->ms_size - sectorsz; msp->ms_size - sectorsz;
if (!range_tree_contains(rt, ms_last_offset, sectorsz)) { if (!zfs_range_tree_contains(rt, ms_last_offset, sectorsz)) {
range_tree_add(rt, ms_last_offset, sectorsz); zfs_range_tree_add(rt, ms_last_offset, sectorsz);
} }
/* /*
@ -4582,12 +4583,12 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
* discard any state that we have already processed. * discard any state that we have already processed.
*/ */
if (vre->vre_offset > msp->ms_start) { if (vre->vre_offset > msp->ms_start) {
range_tree_clear(rt, msp->ms_start, zfs_range_tree_clear(rt, msp->ms_start,
vre->vre_offset - msp->ms_start); vre->vre_offset - msp->ms_start);
} }
while (!zthr_iscancelled(zthr) && while (!zthr_iscancelled(zthr) &&
!range_tree_is_empty(rt) && !zfs_range_tree_is_empty(rt) &&
vre->vre_failed_offset == UINT64_MAX) { vre->vre_failed_offset == UINT64_MAX) {
/* /*
@ -4649,8 +4650,8 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
spa_config_exit(spa, SCL_CONFIG, FTAG); spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_enable(msp, B_FALSE, B_FALSE); metaslab_enable(msp, B_FALSE, B_FALSE);
range_tree_vacate(rt, NULL, NULL); zfs_range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt); zfs_range_tree_destroy(rt);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
raidvd = vdev_lookup_top(spa, vre->vre_vdev_id); raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);

View File

@ -641,10 +641,10 @@ vdev_rebuild_ranges(vdev_rebuild_t *vr)
zfs_btree_index_t idx; zfs_btree_index_t idx;
int error; int error;
for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL; for (zfs_range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) { rs = zfs_btree_next(t, &idx, &idx)) {
uint64_t start = rs_get_start(rs, vr->vr_scan_tree); uint64_t start = zfs_rs_get_start(rs, vr->vr_scan_tree);
uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start; uint64_t size = zfs_rs_get_end(rs, vr->vr_scan_tree) - start;
/* /*
* zfs_scan_suspend_progress can be set to disable rebuild * zfs_scan_suspend_progress can be set to disable rebuild
@ -786,7 +786,8 @@ vdev_rebuild_thread(void *arg)
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
vr->vr_top_vdev = vd; vr->vr_top_vdev = vd;
vr->vr_scan_msp = NULL; vr->vr_scan_msp = NULL;
vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); vr->vr_scan_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL,
0, 0);
mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL); cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL);
@ -833,7 +834,7 @@ vdev_rebuild_thread(void *arg)
break; break;
} }
ASSERT0(range_tree_space(vr->vr_scan_tree)); ASSERT0(zfs_range_tree_space(vr->vr_scan_tree));
/* Disable any new allocations to this metaslab */ /* Disable any new allocations to this metaslab */
spa_config_exit(spa, SCL_CONFIG, FTAG); spa_config_exit(spa, SCL_CONFIG, FTAG);
@ -848,7 +849,7 @@ vdev_rebuild_thread(void *arg)
* on disk and therefore will be rebuilt. * on disk and therefore will be rebuilt.
*/ */
for (int j = 0; j < TXG_SIZE; j++) { for (int j = 0; j < TXG_SIZE; j++) {
if (range_tree_space(msp->ms_allocating[j])) { if (zfs_range_tree_space(msp->ms_allocating[j])) {
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock); mutex_exit(&msp->ms_sync_lock);
txg_wait_synced(dsl, 0); txg_wait_synced(dsl, 0);
@ -869,21 +870,21 @@ vdev_rebuild_thread(void *arg)
vr->vr_scan_tree, SM_ALLOC)); vr->vr_scan_tree, SM_ALLOC));
for (int i = 0; i < TXG_SIZE; i++) { for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(range_tree_space( ASSERT0(zfs_range_tree_space(
msp->ms_allocating[i])); msp->ms_allocating[i]));
} }
range_tree_walk(msp->ms_unflushed_allocs, zfs_range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, vr->vr_scan_tree); zfs_range_tree_add, vr->vr_scan_tree);
range_tree_walk(msp->ms_unflushed_frees, zfs_range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, vr->vr_scan_tree); zfs_range_tree_remove, vr->vr_scan_tree);
/* /*
* Remove ranges which have already been rebuilt based * Remove ranges which have already been rebuilt based
* on the last offset. This can happen when restarting * on the last offset. This can happen when restarting
* a scan after exporting and re-importing the pool. * a scan after exporting and re-importing the pool.
*/ */
range_tree_clear(vr->vr_scan_tree, 0, zfs_range_tree_clear(vr->vr_scan_tree, 0,
vrp->vrp_last_offset); vrp->vrp_last_offset);
} }
@ -904,7 +905,7 @@ vdev_rebuild_thread(void *arg)
* Walk the allocated space map and issue the rebuild I/O. * Walk the allocated space map and issue the rebuild I/O.
*/ */
error = vdev_rebuild_ranges(vr); error = vdev_rebuild_ranges(vr);
range_tree_vacate(vr->vr_scan_tree, NULL, NULL); zfs_range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
metaslab_enable(msp, B_FALSE, B_FALSE); metaslab_enable(msp, B_FALSE, B_FALSE);
@ -913,7 +914,7 @@ vdev_rebuild_thread(void *arg)
break; break;
} }
range_tree_destroy(vr->vr_scan_tree); zfs_range_tree_destroy(vr->vr_scan_tree);
spa_config_exit(spa, SCL_CONFIG, FTAG); spa_config_exit(spa, SCL_CONFIG, FTAG);
/* Wait for any remaining rebuild I/O to complete */ /* Wait for any remaining rebuild I/O to complete */

View File

@ -369,12 +369,13 @@ spa_vdev_removal_create(vdev_t *vd)
spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); svr->svr_allocd_segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
svr->svr_vdev_id = vd->vdev_id; svr->svr_vdev_id = vd->vdev_id;
for (int i = 0; i < TXG_SIZE; i++) { for (int i = 0; i < TXG_SIZE; i++) {
svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL, svr->svr_frees[i] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
0, 0); NULL, 0, 0);
list_create(&svr->svr_new_segments[i], list_create(&svr->svr_new_segments[i],
sizeof (vdev_indirect_mapping_entry_t), sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node)); offsetof(vdev_indirect_mapping_entry_t, vime_node));
@ -389,11 +390,11 @@ spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
for (int i = 0; i < TXG_SIZE; i++) { for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]); ASSERT0(svr->svr_bytes_done[i]);
ASSERT0(svr->svr_max_offset_to_sync[i]); ASSERT0(svr->svr_max_offset_to_sync[i]);
range_tree_destroy(svr->svr_frees[i]); zfs_range_tree_destroy(svr->svr_frees[i]);
list_destroy(&svr->svr_new_segments[i]); list_destroy(&svr->svr_new_segments[i]);
} }
range_tree_destroy(svr->svr_allocd_segs); zfs_range_tree_destroy(svr->svr_allocd_segs);
mutex_destroy(&svr->svr_lock); mutex_destroy(&svr->svr_lock);
cv_destroy(&svr->svr_cv); cv_destroy(&svr->svr_cv);
kmem_free(svr, sizeof (*svr)); kmem_free(svr, sizeof (*svr));
@ -475,11 +476,11 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
* be copied. * be copied.
*/ */
spa->spa_removing_phys.sr_to_copy -= spa->spa_removing_phys.sr_to_copy -=
range_tree_space(ms->ms_freeing); zfs_range_tree_space(ms->ms_freeing);
ASSERT0(range_tree_space(ms->ms_freed)); ASSERT0(zfs_range_tree_space(ms->ms_freed));
for (int t = 0; t < TXG_SIZE; t++) for (int t = 0; t < TXG_SIZE; t++)
ASSERT0(range_tree_space(ms->ms_allocating[t])); ASSERT0(zfs_range_tree_space(ms->ms_allocating[t]));
} }
/* /*
@ -770,7 +771,7 @@ free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
* completed the copy and synced the mapping (see * completed the copy and synced the mapping (see
* vdev_mapping_sync). * vdev_mapping_sync).
*/ */
range_tree_add(svr->svr_frees[txgoff], zfs_range_tree_add(svr->svr_frees[txgoff],
offset, inflight_size); offset, inflight_size);
size -= inflight_size; size -= inflight_size;
offset += inflight_size; offset += inflight_size;
@ -806,7 +807,8 @@ free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
uint64_t, size); uint64_t, size);
if (svr->svr_allocd_segs != NULL) if (svr->svr_allocd_segs != NULL)
range_tree_clear(svr->svr_allocd_segs, offset, size); zfs_range_tree_clear(svr->svr_allocd_segs, offset,
size);
/* /*
* Since we now do not need to copy this data, for * Since we now do not need to copy this data, for
@ -915,7 +917,7 @@ vdev_mapping_sync(void *arg, dmu_tx_t *tx)
* mapping entries were in flight. * mapping entries were in flight.
*/ */
mutex_enter(&svr->svr_lock); mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_frees[txg & TXG_MASK], zfs_range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
free_mapped_segment_cb, vd); free_mapped_segment_cb, vd);
ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
vdev_indirect_mapping_max_offset(vim)); vdev_indirect_mapping_max_offset(vim));
@ -929,7 +931,7 @@ typedef struct vdev_copy_segment_arg {
spa_t *vcsa_spa; spa_t *vcsa_spa;
dva_t *vcsa_dest_dva; dva_t *vcsa_dest_dva;
uint64_t vcsa_txg; uint64_t vcsa_txg;
range_tree_t *vcsa_obsolete_segs; zfs_range_tree_t *vcsa_obsolete_segs;
} vdev_copy_segment_arg_t; } vdev_copy_segment_arg_t;
static void static void
@ -966,9 +968,9 @@ spa_vdev_copy_segment_done(zio_t *zio)
{ {
vdev_copy_segment_arg_t *vcsa = zio->io_private; vdev_copy_segment_arg_t *vcsa = zio->io_private;
range_tree_vacate(vcsa->vcsa_obsolete_segs, zfs_range_tree_vacate(vcsa->vcsa_obsolete_segs,
unalloc_seg, vcsa); unalloc_seg, vcsa);
range_tree_destroy(vcsa->vcsa_obsolete_segs); zfs_range_tree_destroy(vcsa->vcsa_obsolete_segs);
kmem_free(vcsa, sizeof (*vcsa)); kmem_free(vcsa, sizeof (*vcsa));
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
@ -1119,7 +1121,7 @@ spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
* read from the old location and write to the new location. * read from the old location and write to the new location.
*/ */
static int static int
spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs,
uint64_t maxalloc, uint64_t txg, uint64_t maxalloc, uint64_t txg,
vdev_copy_arg_t *vca, zio_alloc_list_t *zal) vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
{ {
@ -1128,14 +1130,14 @@ spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
spa_vdev_removal_t *svr = spa->spa_vdev_removal; spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_entry_t *entry; vdev_indirect_mapping_entry_t *entry;
dva_t dst = {{ 0 }}; dva_t dst = {{ 0 }};
uint64_t start = range_tree_min(segs); uint64_t start = zfs_range_tree_min(segs);
ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift)); ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift));
ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift)); ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift));
uint64_t size = range_tree_span(segs); uint64_t size = zfs_range_tree_span(segs);
if (range_tree_span(segs) > maxalloc) { if (zfs_range_tree_span(segs) > maxalloc) {
/* /*
* We can't allocate all the segments. Prefer to end * We can't allocate all the segments. Prefer to end
* the allocation at the end of a segment, thus avoiding * the allocation at the end of a segment, thus avoiding
@ -1143,13 +1145,13 @@ spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
*/ */
range_seg_max_t search; range_seg_max_t search;
zfs_btree_index_t where; zfs_btree_index_t where;
rs_set_start(&search, segs, start + maxalloc); zfs_rs_set_start(&search, segs, start + maxalloc);
rs_set_end(&search, segs, start + maxalloc); zfs_rs_set_end(&search, segs, start + maxalloc);
(void) zfs_btree_find(&segs->rt_root, &search, &where); (void) zfs_btree_find(&segs->rt_root, &search, &where);
range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where, zfs_range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
&where); &where);
if (rs != NULL) { if (rs != NULL) {
size = rs_get_end(rs, segs) - start; size = zfs_rs_get_end(rs, segs) - start;
} else { } else {
/* /*
* There are no segments that end before maxalloc. * There are no segments that end before maxalloc.
@ -1182,27 +1184,27 @@ spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
* relative to the start of the range to be copied (i.e. relative to the * relative to the start of the range to be copied (i.e. relative to the
* local variable "start"). * local variable "start").
*/ */
range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL, zfs_range_tree_t *obsolete_segs = zfs_range_tree_create(NULL,
0, 0); ZFS_RANGE_SEG64, NULL, 0, 0);
zfs_btree_index_t where; zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where); zfs_range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
ASSERT3U(rs_get_start(rs, segs), ==, start); ASSERT3U(zfs_rs_get_start(rs, segs), ==, start);
uint64_t prev_seg_end = rs_get_end(rs, segs); uint64_t prev_seg_end = zfs_rs_get_end(rs, segs);
while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) { while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) {
if (rs_get_start(rs, segs) >= start + size) { if (zfs_rs_get_start(rs, segs) >= start + size) {
break; break;
} else { } else {
range_tree_add(obsolete_segs, zfs_range_tree_add(obsolete_segs,
prev_seg_end - start, prev_seg_end - start,
rs_get_start(rs, segs) - prev_seg_end); zfs_rs_get_start(rs, segs) - prev_seg_end);
} }
prev_seg_end = rs_get_end(rs, segs); prev_seg_end = zfs_rs_get_end(rs, segs);
} }
/* We don't end in the middle of an obsolete range */ /* We don't end in the middle of an obsolete range */
ASSERT3U(start + size, <=, prev_seg_end); ASSERT3U(start + size, <=, prev_seg_end);
range_tree_clear(segs, start, size); zfs_range_tree_clear(segs, start, size);
/* /*
* We can't have any padding of the allocated size, otherwise we will * We can't have any padding of the allocated size, otherwise we will
@ -1216,7 +1218,8 @@ spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
entry->vime_mapping.vimep_dst = dst; entry->vime_mapping.vimep_dst = dst;
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
entry->vime_obsolete_count = range_tree_space(obsolete_segs); entry->vime_obsolete_count =
zfs_range_tree_space(obsolete_segs);
} }
vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
@ -1455,30 +1458,31 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
* allocated segments that we are copying. We may also be copying * allocated segments that we are copying. We may also be copying
* free segments (of up to vdev_removal_max_span bytes). * free segments (of up to vdev_removal_max_span bytes).
*/ */
range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
for (;;) { for (;;) {
range_tree_t *rt = svr->svr_allocd_segs; zfs_range_tree_t *rt = svr->svr_allocd_segs;
range_seg_t *rs = range_tree_first(rt); zfs_range_seg_t *rs = zfs_range_tree_first(rt);
if (rs == NULL) if (rs == NULL)
break; break;
uint64_t seg_length; uint64_t seg_length;
if (range_tree_is_empty(segs)) { if (zfs_range_tree_is_empty(segs)) {
/* need to truncate the first seg based on max_alloc */ /* need to truncate the first seg based on max_alloc */
seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs, seg_length = MIN(zfs_rs_get_end(rs, rt) -
rt), *max_alloc); zfs_rs_get_start(rs, rt), *max_alloc);
} else { } else {
if (rs_get_start(rs, rt) - range_tree_max(segs) > if (zfs_rs_get_start(rs, rt) - zfs_range_tree_max(segs)
vdev_removal_max_span) { > vdev_removal_max_span) {
/* /*
* Including this segment would cause us to * Including this segment would cause us to
* copy a larger unneeded chunk than is allowed. * copy a larger unneeded chunk than is allowed.
*/ */
break; break;
} else if (rs_get_end(rs, rt) - range_tree_min(segs) > } else if (zfs_rs_get_end(rs, rt) -
*max_alloc) { zfs_range_tree_min(segs) > *max_alloc) {
/* /*
* This additional segment would extend past * This additional segment would extend past
* max_alloc. Rather than splitting this * max_alloc. Rather than splitting this
@ -1486,19 +1490,19 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
*/ */
break; break;
} else { } else {
seg_length = rs_get_end(rs, rt) - seg_length = zfs_rs_get_end(rs, rt) -
rs_get_start(rs, rt); zfs_rs_get_start(rs, rt);
} }
} }
range_tree_add(segs, rs_get_start(rs, rt), seg_length); zfs_range_tree_add(segs, zfs_rs_get_start(rs, rt), seg_length);
range_tree_remove(svr->svr_allocd_segs, zfs_range_tree_remove(svr->svr_allocd_segs,
rs_get_start(rs, rt), seg_length); zfs_rs_get_start(rs, rt), seg_length);
} }
if (range_tree_is_empty(segs)) { if (zfs_range_tree_is_empty(segs)) {
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
range_tree_destroy(segs); zfs_range_tree_destroy(segs);
return; return;
} }
@ -1507,20 +1511,20 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
svr, tx); svr, tx);
} }
svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs); svr->svr_max_offset_to_sync[txg & TXG_MASK] = zfs_range_tree_max(segs);
/* /*
* Note: this is the amount of *allocated* space * Note: this is the amount of *allocated* space
* that we are taking care of each txg. * that we are taking care of each txg.
*/ */
svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs); svr->svr_bytes_done[txg & TXG_MASK] += zfs_range_tree_space(segs);
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
zio_alloc_list_t zal; zio_alloc_list_t zal;
metaslab_trace_init(&zal); metaslab_trace_init(&zal);
uint64_t thismax = SPA_MAXBLOCKSIZE; uint64_t thismax = SPA_MAXBLOCKSIZE;
while (!range_tree_is_empty(segs)) { while (!zfs_range_tree_is_empty(segs)) {
int error = spa_vdev_copy_segment(vd, int error = spa_vdev_copy_segment(vd,
segs, thismax, txg, vca, &zal); segs, thismax, txg, vca, &zal);
@ -1537,7 +1541,7 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
uint64_t attempted = uint64_t attempted =
MIN(range_tree_span(segs), thismax); MIN(zfs_range_tree_span(segs), thismax);
thismax = P2ROUNDUP(attempted / 2, thismax = P2ROUNDUP(attempted / 2,
1 << spa->spa_max_ashift); 1 << spa->spa_max_ashift);
/* /*
@ -1557,7 +1561,7 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
} }
} }
metaslab_trace_fini(&zal); metaslab_trace_fini(&zal);
range_tree_destroy(segs); zfs_range_tree_destroy(segs);
} }
/* /*
@ -1628,7 +1632,7 @@ spa_vdev_remove_thread(void *arg)
metaslab_t *msp = vd->vdev_ms[msi]; metaslab_t *msp = vd->vdev_ms[msi];
ASSERT3U(msi, <=, vd->vdev_ms_count); ASSERT3U(msi, <=, vd->vdev_ms_count);
ASSERT0(range_tree_space(svr->svr_allocd_segs)); ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_sync_lock); mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
@ -1637,7 +1641,7 @@ spa_vdev_remove_thread(void *arg)
* Assert nothing in flight -- ms_*tree is empty. * Assert nothing in flight -- ms_*tree is empty.
*/ */
for (int i = 0; i < TXG_SIZE; i++) { for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(range_tree_space(msp->ms_allocating[i])); ASSERT0(zfs_range_tree_space(msp->ms_allocating[i]));
} }
/* /*
@ -1653,19 +1657,20 @@ spa_vdev_remove_thread(void *arg)
VERIFY0(space_map_load(msp->ms_sm, VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC)); svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs, zfs_range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs); zfs_range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees, zfs_range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs); zfs_range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing, zfs_range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs); zfs_range_tree_remove, svr->svr_allocd_segs);
/* /*
* When we are resuming from a paused removal (i.e. * When we are resuming from a paused removal (i.e.
* when importing a pool with a removal in progress), * when importing a pool with a removal in progress),
* discard any state that we have already processed. * discard any state that we have already processed.
*/ */
range_tree_clear(svr->svr_allocd_segs, 0, start_offset); zfs_range_tree_clear(svr->svr_allocd_segs, 0,
start_offset);
} }
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock); mutex_exit(&msp->ms_sync_lock);
@ -1677,7 +1682,7 @@ spa_vdev_remove_thread(void *arg)
(u_longlong_t)msp->ms_id); (u_longlong_t)msp->ms_id);
while (!svr->svr_thread_exit && while (!svr->svr_thread_exit &&
!range_tree_is_empty(svr->svr_allocd_segs)) { !zfs_range_tree_is_empty(svr->svr_allocd_segs)) {
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
@ -1756,7 +1761,7 @@ spa_vdev_remove_thread(void *arg)
if (svr->svr_thread_exit) { if (svr->svr_thread_exit) {
mutex_enter(&svr->svr_lock); mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); zfs_range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
svr->svr_thread = NULL; svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv); cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
@ -1776,7 +1781,7 @@ spa_vdev_remove_thread(void *arg)
spa_vdev_remove_cancel_impl(spa); spa_vdev_remove_cancel_impl(spa);
} }
} else { } else {
ASSERT0(range_tree_space(svr->svr_allocd_segs)); ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
vdev_remove_complete(spa); vdev_remove_complete(spa);
} }
@ -1885,7 +1890,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break; break;
ASSERT0(range_tree_space(svr->svr_allocd_segs)); ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
@ -1893,22 +1898,22 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
* Assert nothing in flight -- ms_*tree is empty. * Assert nothing in flight -- ms_*tree is empty.
*/ */
for (int i = 0; i < TXG_SIZE; i++) for (int i = 0; i < TXG_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_allocating[i])); ASSERT0(zfs_range_tree_space(msp->ms_allocating[i]));
for (int i = 0; i < TXG_DEFER_SIZE; i++) for (int i = 0; i < TXG_DEFER_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_defer[i])); ASSERT0(zfs_range_tree_space(msp->ms_defer[i]));
ASSERT0(range_tree_space(msp->ms_freed)); ASSERT0(zfs_range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) { if (msp->ms_sm != NULL) {
mutex_enter(&svr->svr_lock); mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm, VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC)); svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs, zfs_range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs); zfs_range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees, zfs_range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs); zfs_range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing, zfs_range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs); zfs_range_tree_remove, svr->svr_allocd_segs);
/* /*
* Clear everything past what has been synced, * Clear everything past what has been synced,
@ -1918,7 +1923,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
uint64_t sm_end = msp->ms_sm->sm_start + uint64_t sm_end = msp->ms_sm->sm_start +
msp->ms_sm->sm_size; msp->ms_sm->sm_size;
if (sm_end > syncd) if (sm_end > syncd)
range_tree_clear(svr->svr_allocd_segs, zfs_range_tree_clear(svr->svr_allocd_segs,
syncd, sm_end - syncd); syncd, sm_end - syncd);
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
@ -1926,7 +1931,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
mutex_enter(&svr->svr_lock); mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs, zfs_range_tree_vacate(svr->svr_allocd_segs,
free_mapped_segment_cb, vd); free_mapped_segment_cb, vd);
mutex_exit(&svr->svr_lock); mutex_exit(&svr->svr_lock);
} }
@ -1935,7 +1940,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
* Note: this must happen after we invoke free_mapped_segment_cb, * Note: this must happen after we invoke free_mapped_segment_cb,
* because it adds to the obsolete_segments. * because it adds to the obsolete_segments.
*/ */
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
ASSERT3U(vic->vic_mapping_object, ==, ASSERT3U(vic->vic_mapping_object, ==,
vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); vdev_indirect_mapping_object(vd->vdev_indirect_mapping));

View File

@ -149,7 +149,7 @@ typedef struct trim_args {
*/ */
vdev_t *trim_vdev; /* Leaf vdev to TRIM */ vdev_t *trim_vdev; /* Leaf vdev to TRIM */
metaslab_t *trim_msp; /* Disabled metaslab */ metaslab_t *trim_msp; /* Disabled metaslab */
range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */ zfs_range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
trim_type_t trim_type; /* Manual or auto TRIM */ trim_type_t trim_type; /* Manual or auto TRIM */
uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */ uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */ uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
@ -601,10 +601,10 @@ vdev_trim_ranges(trim_args_t *ta)
ta->trim_start_time = gethrtime(); ta->trim_start_time = gethrtime();
ta->trim_bytes_done = 0; ta->trim_bytes_done = 0;
for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL; for (zfs_range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) { rs = zfs_btree_next(t, &idx, &idx)) {
uint64_t size = rs_get_end(rs, ta->trim_tree) - rs_get_start(rs, uint64_t size = zfs_rs_get_end(rs, ta->trim_tree) -
ta->trim_tree); zfs_rs_get_start(rs, ta->trim_tree);
if (extent_bytes_min && size < extent_bytes_min) { if (extent_bytes_min && size < extent_bytes_min) {
spa_iostats_trim_add(spa, ta->trim_type, spa_iostats_trim_add(spa, ta->trim_type,
@ -617,7 +617,7 @@ vdev_trim_ranges(trim_args_t *ta)
for (uint64_t w = 0; w < writes_required; w++) { for (uint64_t w = 0; w < writes_required; w++) {
error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE + error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE +
rs_get_start(rs, ta->trim_tree) + zfs_rs_get_start(rs, ta->trim_tree) +
(w *extent_bytes_max), MIN(size - (w *extent_bytes_max), MIN(size -
(w * extent_bytes_max), extent_bytes_max)); (w * extent_bytes_max), extent_bytes_max));
if (error != 0) { if (error != 0) {
@ -729,13 +729,13 @@ vdev_trim_calculate_progress(vdev_t *vd)
*/ */
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
range_tree_t *rt = msp->ms_allocatable; zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *bt = &rt->rt_root; zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t idx; zfs_btree_index_t idx;
for (range_seg_t *rs = zfs_btree_first(bt, &idx); for (zfs_range_seg_t *rs = zfs_btree_first(bt, &idx);
rs != NULL; rs = zfs_btree_next(bt, &idx, &idx)) { rs != NULL; rs = zfs_btree_next(bt, &idx, &idx)) {
logical_rs.rs_start = rs_get_start(rs, rt); logical_rs.rs_start = zfs_rs_get_start(rs, rt);
logical_rs.rs_end = rs_get_end(rs, rt); logical_rs.rs_end = zfs_rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs, vdev_xlate_walk(vd, &logical_rs,
vdev_trim_xlate_progress, vd); vdev_trim_xlate_progress, vd);
@ -832,7 +832,7 @@ vdev_trim_xlate_range_add(void *arg, range_seg64_t *physical_rs)
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start); ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
range_tree_add(ta->trim_tree, physical_rs->rs_start, zfs_range_tree_add(ta->trim_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start); physical_rs->rs_end - physical_rs->rs_start);
} }
@ -858,7 +858,8 @@ vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
metaslab_t *msp = ta->trim_msp; metaslab_t *msp = ta->trim_msp;
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
VERIFY3B(msp->ms_loaded, ==, B_TRUE); VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size)); VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start,
size));
} }
ASSERT(vd->vdev_ops->vdev_op_leaf); ASSERT(vd->vdev_ops->vdev_op_leaf);
@ -900,7 +901,7 @@ vdev_trim_thread(void *arg)
ta.trim_vdev = vd; ta.trim_vdev = vd;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max; ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min; ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL; ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_flags = 0; ta.trim_flags = 0;
@ -946,22 +947,23 @@ vdev_trim_thread(void *arg)
} }
ta.trim_msp = msp; ta.trim_msp = msp;
range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta); zfs_range_tree_walk(msp->ms_allocatable, vdev_trim_range_add,
range_tree_vacate(msp->ms_trim, NULL, NULL); &ta);
zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
error = vdev_trim_ranges(&ta); error = vdev_trim_ranges(&ta);
metaslab_enable(msp, B_TRUE, B_FALSE); metaslab_enable(msp, B_TRUE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(ta.trim_tree, NULL, NULL); zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
if (error != 0) if (error != 0)
break; break;
} }
spa_config_exit(spa, SCL_CONFIG, FTAG); spa_config_exit(spa, SCL_CONFIG, FTAG);
range_tree_destroy(ta.trim_tree); zfs_range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock); mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted) { if (!vd->vdev_trim_exit_wanted) {
@ -1204,7 +1206,7 @@ vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size)
VERIFY3B(msp->ms_loaded, ==, B_TRUE); VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY3U(msp->ms_disabled, >, 0); VERIFY3U(msp->ms_disabled, >, 0);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size)); VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start, size));
} }
/* /*
@ -1261,7 +1263,7 @@ vdev_autotrim_thread(void *arg)
for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count; for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count;
i += txgs_per_trim) { i += txgs_per_trim) {
metaslab_t *msp = vd->vdev_ms[i]; metaslab_t *msp = vd->vdev_ms[i];
range_tree_t *trim_tree; zfs_range_tree_t *trim_tree;
boolean_t issued_trim = B_FALSE; boolean_t issued_trim = B_FALSE;
boolean_t wait_aborted = B_FALSE; boolean_t wait_aborted = B_FALSE;
@ -1276,7 +1278,7 @@ vdev_autotrim_thread(void *arg)
* or when there are no recent frees to trim. * or when there are no recent frees to trim.
*/ */
if (msp->ms_sm == NULL || if (msp->ms_sm == NULL ||
range_tree_is_empty(msp->ms_trim)) { zfs_range_tree_is_empty(msp->ms_trim)) {
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE); metaslab_enable(msp, B_FALSE, B_FALSE);
continue; continue;
@ -1302,10 +1304,10 @@ vdev_autotrim_thread(void *arg)
* Allocate an empty range tree which is swapped in * Allocate an empty range tree which is swapped in
* for the existing ms_trim tree while it is processed. * for the existing ms_trim tree while it is processed.
*/ */
trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
0, 0); NULL, 0, 0);
range_tree_swap(&msp->ms_trim, &trim_tree); zfs_range_tree_swap(&msp->ms_trim, &trim_tree);
ASSERT(range_tree_is_empty(msp->ms_trim)); ASSERT(zfs_range_tree_is_empty(msp->ms_trim));
/* /*
* There are two cases when constructing the per-vdev * There are two cases when constructing the per-vdev
@ -1357,9 +1359,9 @@ vdev_autotrim_thread(void *arg)
if (!cvd->vdev_ops->vdev_op_leaf) if (!cvd->vdev_ops->vdev_op_leaf)
continue; continue;
ta->trim_tree = range_tree_create(NULL, ta->trim_tree = zfs_range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0); ZFS_RANGE_SEG64, NULL, 0, 0);
range_tree_walk(trim_tree, zfs_range_tree_walk(trim_tree,
vdev_trim_range_add, ta); vdev_trim_range_add, ta);
} }
@ -1406,13 +1408,13 @@ vdev_autotrim_thread(void *arg)
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp)); VERIFY0(metaslab_load(msp));
VERIFY3P(tap[0].trim_msp, ==, msp); VERIFY3P(tap[0].trim_msp, ==, msp);
range_tree_walk(trim_tree, zfs_range_tree_walk(trim_tree,
vdev_trim_range_verify, &tap[0]); vdev_trim_range_verify, &tap[0]);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
} }
range_tree_vacate(trim_tree, NULL, NULL); zfs_range_tree_vacate(trim_tree, NULL, NULL);
range_tree_destroy(trim_tree); zfs_range_tree_destroy(trim_tree);
/* /*
* Wait for couples of kicks, to ensure the trim io is * Wait for couples of kicks, to ensure the trim io is
@ -1434,8 +1436,9 @@ vdev_autotrim_thread(void *arg)
if (ta->trim_tree == NULL) if (ta->trim_tree == NULL)
continue; continue;
range_tree_vacate(ta->trim_tree, NULL, NULL); zfs_range_tree_vacate(ta->trim_tree, NULL,
range_tree_destroy(ta->trim_tree); NULL);
zfs_range_tree_destroy(ta->trim_tree);
} }
kmem_free(tap, sizeof (trim_args_t) * children); kmem_free(tap, sizeof (trim_args_t) * children);
@ -1474,7 +1477,7 @@ vdev_autotrim_thread(void *arg)
metaslab_t *msp = vd->vdev_ms[i]; metaslab_t *msp = vd->vdev_ms[i];
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_trim, NULL, NULL); zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
} }
} }
@ -1596,7 +1599,7 @@ vdev_trim_l2arc_thread(void *arg)
vd->vdev_trim_secure = 0; vd->vdev_trim_secure = 0;
ta.trim_vdev = vd; ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL; ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max; ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE; ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
@ -1606,7 +1609,7 @@ vdev_trim_l2arc_thread(void *arg)
physical_rs.rs_end = vd->vdev_trim_bytes_est = physical_rs.rs_end = vd->vdev_trim_bytes_est =
vdev_get_min_asize(vd); vdev_get_min_asize(vd);
range_tree_add(ta.trim_tree, physical_rs.rs_start, zfs_range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start); physical_rs.rs_end - physical_rs.rs_start);
mutex_enter(&vd->vdev_trim_lock); mutex_enter(&vd->vdev_trim_lock);
@ -1622,8 +1625,8 @@ vdev_trim_l2arc_thread(void *arg)
} }
mutex_exit(&vd->vdev_trim_io_lock); mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL); zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree); zfs_range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock); mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) { if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) {
@ -1731,7 +1734,7 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
ASSERT(!vd->vdev_top->vdev_rz_expanding); ASSERT(!vd->vdev_top->vdev_rz_expanding);
ta.trim_vdev = vd; ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_SIMPLE; ta.trim_type = TRIM_TYPE_SIMPLE;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max; ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE; ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
@ -1740,7 +1743,7 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start); ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
if (physical_rs.rs_end > physical_rs.rs_start) { if (physical_rs.rs_end > physical_rs.rs_start) {
range_tree_add(ta.trim_tree, physical_rs.rs_start, zfs_range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start); physical_rs.rs_end - physical_rs.rs_start);
} else { } else {
ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start); ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
@ -1754,8 +1757,8 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
} }
mutex_exit(&vd->vdev_trim_io_lock); mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL); zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree); zfs_range_tree_destroy(ta.trim_tree);
return (error); return (error);
} }