mirror of
https://git.proxmox.com/git/mirror_zfs
synced 2025-04-28 07:59:14 +00:00
Unified allocation throttling (#17020)
Existing allocation throttling had a goal to improve write speed by allocating more data to vdevs that are able to write it faster. But in the process it completely broken the original mechanism, designed to balance vdev space usage. With severe vdev space use imbalance it is possible that some with higher use start growing fragmentation sooner than others and after getting full will stop any writes at all. Also after vdev addition it might take a very long time for pool to restore the balance, since the new vdev does not have any real preference, unless the old one is already much slower due to fragmentation. Also the old throttling was request- based, which was unpredictable with block sizes varying from 512B to 16MB, neither it made much sense in case of I/O aggregation, when its 32-100 requests could be aggregated into few, leaving device underutilized, submitting fewer and/or shorter requests, or in opposite try to queue up to 1.6GB of writes per device. This change presents a completely new throttling algorithm. Unlike the request-based old one, this one measures allocation queue in bytes. It makes possible to integrate with the reworked allocation quota (aliquot) mechanism, which is also byte-based. Unlike the original code, balancing the vdevs amounts of free space, this one balances their free/used space fractions. It should result in a lower and more uniform fragmentation in a long run. This algorithm still allows to improve write speed by allocating more data to faster vdevs, but does it in more controllable way. On top of space-based allocation quota, it also calculates minimum queue depth that vdev is allowed to maintain, and respectively the amount of extra allocations it can receive if it appear faster. That amount is based on vdev's capacity and space usage, but also applied only when the pool is busy. This way the code can choose between faster writes when needed and better vdev balance when not, with the choice gradually reducing together with the free space. This change also makes allocation queues per-class, allowing them to throttle independently and in parallel. Allocations that are bounced between classes due to allocation errors will be able to properly throttle in the new class. Allocations that should not be throttled (ZIL, gang, copies) are not, but may still follow the rotor and allocation quota mechanism of the class without disrupting it. Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored by: iXsystems, Inc. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: Paul Dagnelie <pcd@delphix.com>
This commit is contained in:
parent
3862ebbf1f
commit
94a3fabcb0
@ -64,6 +64,8 @@ extern uint64_t zfs_wrlog_data_max;
|
||||
extern uint_t zfs_dirty_data_max_percent;
|
||||
extern uint_t zfs_dirty_data_max_max_percent;
|
||||
extern uint_t zfs_delay_min_dirty_percent;
|
||||
extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
|
||||
extern uint_t zfs_vdev_async_write_active_max_dirty_percent;
|
||||
extern uint64_t zfs_delay_scale;
|
||||
|
||||
/* These macros are for indexing into the zfs_all_blkstats_t. */
|
||||
|
@ -75,18 +75,13 @@ uint64_t metaslab_largest_allocatable(metaslab_t *);
|
||||
/*
|
||||
* metaslab alloc flags
|
||||
*/
|
||||
#define METASLAB_HINTBP_FAVOR 0x0
|
||||
#define METASLAB_HINTBP_AVOID 0x1
|
||||
#define METASLAB_ZIL 0x1
|
||||
#define METASLAB_GANG_HEADER 0x2
|
||||
#define METASLAB_GANG_CHILD 0x4
|
||||
#define METASLAB_ASYNC_ALLOC 0x8
|
||||
#define METASLAB_DONT_THROTTLE 0x10
|
||||
#define METASLAB_MUST_RESERVE 0x20
|
||||
#define METASLAB_ZIL 0x80
|
||||
|
||||
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
|
||||
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *,
|
||||
int);
|
||||
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t, blkptr_t *, int,
|
||||
uint64_t, blkptr_t *, int, zio_alloc_list_t *, int, const void *);
|
||||
int metaslab_alloc_dva(spa_t *, metaslab_class_t *, uint64_t,
|
||||
dva_t *, int, dva_t *, uint64_t, int, zio_alloc_list_t *, int);
|
||||
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
|
||||
@ -103,15 +98,17 @@ void metaslab_stat_fini(void);
|
||||
void metaslab_trace_init(zio_alloc_list_t *);
|
||||
void metaslab_trace_fini(zio_alloc_list_t *);
|
||||
|
||||
metaslab_class_t *metaslab_class_create(spa_t *, const metaslab_ops_t *);
|
||||
metaslab_class_t *metaslab_class_create(spa_t *, const metaslab_ops_t *,
|
||||
boolean_t);
|
||||
void metaslab_class_destroy(metaslab_class_t *);
|
||||
int metaslab_class_validate(metaslab_class_t *);
|
||||
void metaslab_class_validate(metaslab_class_t *);
|
||||
void metaslab_class_balance(metaslab_class_t *mc, boolean_t onsync);
|
||||
void metaslab_class_histogram_verify(metaslab_class_t *);
|
||||
uint64_t metaslab_class_fragmentation(metaslab_class_t *);
|
||||
uint64_t metaslab_class_expandable_space(metaslab_class_t *);
|
||||
boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, int,
|
||||
zio_t *, int);
|
||||
void metaslab_class_throttle_unreserve(metaslab_class_t *, int, int, zio_t *);
|
||||
boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, zio_t *,
|
||||
boolean_t, boolean_t *);
|
||||
boolean_t metaslab_class_throttle_unreserve(metaslab_class_t *, int, zio_t *);
|
||||
void metaslab_class_evict_old(metaslab_class_t *, uint64_t);
|
||||
uint64_t metaslab_class_get_alloc(metaslab_class_t *);
|
||||
uint64_t metaslab_class_get_space(metaslab_class_t *);
|
||||
@ -130,9 +127,8 @@ uint64_t metaslab_group_get_space(metaslab_group_t *);
|
||||
void metaslab_group_histogram_verify(metaslab_group_t *);
|
||||
uint64_t metaslab_group_fragmentation(metaslab_group_t *);
|
||||
void metaslab_group_histogram_remove(metaslab_group_t *, metaslab_t *);
|
||||
void metaslab_group_alloc_decrement(spa_t *, uint64_t, const void *, int, int,
|
||||
boolean_t);
|
||||
void metaslab_group_alloc_verify(spa_t *, const blkptr_t *, const void *, int);
|
||||
void metaslab_group_alloc_decrement(spa_t *, uint64_t, int, int, uint64_t,
|
||||
const void *);
|
||||
void metaslab_recalculate_weight_and_sort(metaslab_t *);
|
||||
void metaslab_disable(metaslab_t *);
|
||||
void metaslab_enable(metaslab_t *, boolean_t, boolean_t);
|
||||
|
@ -141,23 +141,24 @@ typedef enum trace_alloc_type {
|
||||
* Per-allocator data structure.
|
||||
*/
|
||||
typedef struct metaslab_class_allocator {
|
||||
kmutex_t mca_lock;
|
||||
avl_tree_t mca_tree;
|
||||
|
||||
metaslab_group_t *mca_rotor;
|
||||
uint64_t mca_aliquot;
|
||||
|
||||
/*
|
||||
* The allocation throttle works on a reservation system. Whenever
|
||||
* an asynchronous zio wants to perform an allocation it must
|
||||
* first reserve the number of blocks that it wants to allocate.
|
||||
* first reserve the number of bytes that it wants to allocate.
|
||||
* If there aren't sufficient slots available for the pending zio
|
||||
* then that I/O is throttled until more slots free up. The current
|
||||
* number of reserved allocations is maintained by the mca_alloc_slots
|
||||
* refcount. The mca_alloc_max_slots value determines the maximum
|
||||
* number of allocations that the system allows. Gang blocks are
|
||||
* allowed to reserve slots even if we've reached the maximum
|
||||
* number of allocations allowed.
|
||||
* size of reserved allocations is maintained by mca_reserved.
|
||||
* The maximum total size of reserved allocations is determined by
|
||||
* mc_alloc_max in the metaslab_class_t. Gang blocks are allowed
|
||||
* to reserve for their headers even if we've reached the maximum.
|
||||
*/
|
||||
uint64_t mca_alloc_max_slots;
|
||||
zfs_refcount_t mca_alloc_slots;
|
||||
uint64_t mca_reserved;
|
||||
} ____cacheline_aligned metaslab_class_allocator_t;
|
||||
|
||||
/*
|
||||
@ -190,10 +191,10 @@ struct metaslab_class {
|
||||
*/
|
||||
uint64_t mc_groups;
|
||||
|
||||
/*
|
||||
* Toggle to enable/disable the allocation throttle.
|
||||
*/
|
||||
boolean_t mc_is_log;
|
||||
boolean_t mc_alloc_throttle_enabled;
|
||||
uint64_t mc_alloc_io_size;
|
||||
uint64_t mc_alloc_max;
|
||||
|
||||
uint64_t mc_alloc_groups; /* # of allocatable groups */
|
||||
|
||||
@ -216,11 +217,10 @@ struct metaslab_class {
|
||||
* Per-allocator data structure.
|
||||
*/
|
||||
typedef struct metaslab_group_allocator {
|
||||
uint64_t mga_cur_max_alloc_queue_depth;
|
||||
zfs_refcount_t mga_alloc_queue_depth;
|
||||
zfs_refcount_t mga_queue_depth;
|
||||
metaslab_t *mga_primary;
|
||||
metaslab_t *mga_secondary;
|
||||
} metaslab_group_allocator_t;
|
||||
} ____cacheline_aligned metaslab_group_allocator_t;
|
||||
|
||||
/*
|
||||
* Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
|
||||
@ -235,6 +235,7 @@ struct metaslab_group {
|
||||
kmutex_t mg_lock;
|
||||
avl_tree_t mg_metaslab_tree;
|
||||
uint64_t mg_aliquot;
|
||||
uint64_t mg_queue_target;
|
||||
boolean_t mg_allocatable; /* can we allocate? */
|
||||
uint64_t mg_ms_ready;
|
||||
|
||||
@ -246,40 +247,12 @@ struct metaslab_group {
|
||||
*/
|
||||
boolean_t mg_initialized;
|
||||
|
||||
uint64_t mg_free_capacity; /* percentage free */
|
||||
int64_t mg_bias;
|
||||
int64_t mg_activation_count;
|
||||
metaslab_class_t *mg_class;
|
||||
vdev_t *mg_vd;
|
||||
metaslab_group_t *mg_prev;
|
||||
metaslab_group_t *mg_next;
|
||||
|
||||
/*
|
||||
* In order for the allocation throttle to function properly, we cannot
|
||||
* have too many IOs going to each disk by default; the throttle
|
||||
* operates by allocating more work to disks that finish quickly, so
|
||||
* allocating larger chunks to each disk reduces its effectiveness.
|
||||
* However, if the number of IOs going to each allocator is too small,
|
||||
* we will not perform proper aggregation at the vdev_queue layer,
|
||||
* also resulting in decreased performance. Therefore, we will use a
|
||||
* ramp-up strategy.
|
||||
*
|
||||
* Each allocator in each metaslab group has a current queue depth
|
||||
* (mg_alloc_queue_depth[allocator]) and a current max queue depth
|
||||
* (mga_cur_max_alloc_queue_depth[allocator]), and each metaslab group
|
||||
* has an absolute max queue depth (mg_max_alloc_queue_depth). We
|
||||
* add IOs to an allocator until the mg_alloc_queue_depth for that
|
||||
* allocator hits the cur_max. Every time an IO completes for a given
|
||||
* allocator on a given metaslab group, we increment its cur_max until
|
||||
* it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
|
||||
* help protect against disks that decrease in performance over time.
|
||||
*
|
||||
* It's possible for an allocator to handle more allocations than
|
||||
* its max. This can occur when gang blocks are required or when other
|
||||
* groups are unable to handle their share of allocations.
|
||||
*/
|
||||
uint64_t mg_max_alloc_queue_depth;
|
||||
|
||||
/*
|
||||
* A metalab group that can no longer allocate the minimum block
|
||||
* size will set mg_no_free_space. Once a metaslab group is out
|
||||
@ -288,8 +261,6 @@ struct metaslab_group {
|
||||
*/
|
||||
boolean_t mg_no_free_space;
|
||||
|
||||
uint64_t mg_allocations;
|
||||
uint64_t mg_failed_allocations;
|
||||
uint64_t mg_fragmentation;
|
||||
uint64_t mg_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
|
||||
|
||||
@ -508,7 +479,7 @@ struct metaslab {
|
||||
*/
|
||||
hrtime_t ms_load_time; /* time last loaded */
|
||||
hrtime_t ms_unload_time; /* time last unloaded */
|
||||
hrtime_t ms_selected_time; /* time last allocated from */
|
||||
uint64_t ms_selected_time; /* time last allocated from (secs) */
|
||||
|
||||
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
|
||||
uint64_t ms_max_size; /* maximum allocatable size */
|
||||
|
@ -59,11 +59,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct spa_alloc {
|
||||
kmutex_t spaa_lock;
|
||||
avl_tree_t spaa_tree;
|
||||
} ____cacheline_aligned spa_alloc_t;
|
||||
|
||||
typedef struct spa_allocs_use {
|
||||
kmutex_t sau_lock;
|
||||
uint_t sau_rotor;
|
||||
@ -273,12 +268,6 @@ struct spa {
|
||||
uint64_t spa_last_synced_guid; /* last synced guid */
|
||||
list_t spa_config_dirty_list; /* vdevs with dirty config */
|
||||
list_t spa_state_dirty_list; /* vdevs with dirty state */
|
||||
/*
|
||||
* spa_allocs is an array, whose lengths is stored in spa_alloc_count.
|
||||
* There is one tree and one lock for each allocator, to help improve
|
||||
* allocation performance in write-heavy workloads.
|
||||
*/
|
||||
spa_alloc_t *spa_allocs;
|
||||
spa_allocs_use_t *spa_allocs_use;
|
||||
int spa_alloc_count;
|
||||
int spa_active_allocator; /* selectable allocator */
|
||||
|
@ -60,10 +60,6 @@ extern "C" {
|
||||
typedef struct vdev_queue vdev_queue_t;
|
||||
struct abd;
|
||||
|
||||
extern uint_t zfs_vdev_queue_depth_pct;
|
||||
extern uint_t zfs_vdev_def_queue_depth;
|
||||
extern uint_t zfs_vdev_async_write_max_active;
|
||||
|
||||
/*
|
||||
* Virtual device operations
|
||||
*/
|
||||
|
@ -245,16 +245,26 @@ For L2ARC devices less than 1 GiB, the amount of data
|
||||
evicts is significant compared to the amount of restored L2ARC data.
|
||||
In this case, do not write log blocks in L2ARC in order not to waste space.
|
||||
.
|
||||
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
|
||||
Metaslab granularity, in bytes.
|
||||
.It Sy metaslab_aliquot Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq u64
|
||||
Metaslab group's per child vdev allocation granularity, in bytes.
|
||||
This is roughly similar to what would be referred to as the "stripe size"
|
||||
in traditional RAID arrays.
|
||||
In normal operation, ZFS will try to write this amount of data to each disk
|
||||
before moving on to the next top-level vdev.
|
||||
In normal operation, ZFS will try to write this amount of data to each child
|
||||
of a top-level vdev before moving on to the next top-level vdev.
|
||||
.
|
||||
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
||||
Enable metaslab group biasing based on their vdevs' over- or under-utilization
|
||||
relative to the pool.
|
||||
Enable metaslab groups biasing based on their over- or under-utilization
|
||||
relative to the metaslab class average.
|
||||
If disabled, each metaslab group will receive allocations proportional to its
|
||||
capacity.
|
||||
.
|
||||
.It Sy metaslab_perf_bias Ns = Ns Sy 1 Ns | Ns 0 Ns | Ns 2 Pq int
|
||||
Controls metaslab groups biasing based on their write performance.
|
||||
Setting to 0 makes all metaslab groups receive fixed amounts of allocations.
|
||||
Setting to 2 allows faster metaslab groups to allocate more.
|
||||
Setting to 1 equals to 2 if the pool is write-bound or 0 otherwise.
|
||||
That is, if the pool is limited by write throughput, then allocate more from
|
||||
faster metaslab groups, but if not, try to evenly distribute the allocations.
|
||||
.
|
||||
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64
|
||||
Make some blocks above a certain size be gang blocks.
|
||||
@ -1527,23 +1537,6 @@ This enforced wait ensures the HDD services the interactive I/O
|
||||
within a reasonable amount of time.
|
||||
.No See Sx ZFS I/O SCHEDULER .
|
||||
.
|
||||
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
|
||||
Maximum number of queued allocations per top-level vdev expressed as
|
||||
a percentage of
|
||||
.Sy zfs_vdev_async_write_max_active ,
|
||||
which allows the system to detect devices that are more capable
|
||||
of handling allocations and to allocate more blocks to those devices.
|
||||
This allows for dynamic allocation distribution when devices are imbalanced,
|
||||
as fuller devices will tend to be slower than empty devices.
|
||||
.Pp
|
||||
Also see
|
||||
.Sy zio_dva_throttle_enabled .
|
||||
.
|
||||
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
|
||||
Default queue depth for each vdev IO allocator.
|
||||
Higher values allow for better coalescing of sequential writes before sending
|
||||
them to the disk, but can increase transaction commit times.
|
||||
.
|
||||
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
|
||||
Defines if the driver should retire on a given error type.
|
||||
The following options may be bitwise-ored together:
|
||||
@ -2488,10 +2481,7 @@ Slow I/O counters can be seen with
|
||||
.
|
||||
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
||||
Throttle block allocations in the I/O pipeline.
|
||||
This allows for dynamic allocation distribution when devices are imbalanced.
|
||||
When enabled, the maximum number of pending allocations per top-level vdev
|
||||
is limited by
|
||||
.Sy zfs_vdev_queue_depth_pct .
|
||||
This allows for dynamic allocation distribution based on device performance.
|
||||
.
|
||||
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
|
||||
Control the naming scheme used when setting new xattrs in the user namespace.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1686,11 +1686,11 @@ spa_activate(spa_t *spa, spa_mode_t mode)
|
||||
spa->spa_mode = mode;
|
||||
spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
|
||||
|
||||
spa->spa_normal_class = metaslab_class_create(spa, msp);
|
||||
spa->spa_log_class = metaslab_class_create(spa, msp);
|
||||
spa->spa_embedded_log_class = metaslab_class_create(spa, msp);
|
||||
spa->spa_special_class = metaslab_class_create(spa, msp);
|
||||
spa->spa_dedup_class = metaslab_class_create(spa, msp);
|
||||
spa->spa_normal_class = metaslab_class_create(spa, msp, B_FALSE);
|
||||
spa->spa_log_class = metaslab_class_create(spa, msp, B_TRUE);
|
||||
spa->spa_embedded_log_class = metaslab_class_create(spa, msp, B_TRUE);
|
||||
spa->spa_special_class = metaslab_class_create(spa, msp, B_FALSE);
|
||||
spa->spa_dedup_class = metaslab_class_create(spa, msp, B_FALSE);
|
||||
|
||||
/* Try to create a covering process */
|
||||
mutex_enter(&spa->spa_proc_lock);
|
||||
@ -9883,60 +9883,9 @@ spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
|
||||
{
|
||||
ASSERT(spa_writeable(spa));
|
||||
|
||||
vdev_t *rvd = spa->spa_root_vdev;
|
||||
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
|
||||
zfs_vdev_queue_depth_pct / 100;
|
||||
metaslab_class_t *normal = spa_normal_class(spa);
|
||||
metaslab_class_t *special = spa_special_class(spa);
|
||||
metaslab_class_t *dedup = spa_dedup_class(spa);
|
||||
|
||||
uint64_t slots_per_allocator = 0;
|
||||
for (int c = 0; c < rvd->vdev_children; c++) {
|
||||
vdev_t *tvd = rvd->vdev_child[c];
|
||||
|
||||
metaslab_group_t *mg = tvd->vdev_mg;
|
||||
if (mg == NULL || !metaslab_group_initialized(mg))
|
||||
continue;
|
||||
|
||||
metaslab_class_t *mc = mg->mg_class;
|
||||
if (mc != normal && mc != special && mc != dedup)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* It is safe to do a lock-free check here because only async
|
||||
* allocations look at mg_max_alloc_queue_depth, and async
|
||||
* allocations all happen from spa_sync().
|
||||
*/
|
||||
for (int i = 0; i < mg->mg_allocators; i++) {
|
||||
ASSERT0(zfs_refcount_count(
|
||||
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
|
||||
}
|
||||
mg->mg_max_alloc_queue_depth = max_queue_depth;
|
||||
|
||||
for (int i = 0; i < mg->mg_allocators; i++) {
|
||||
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
|
||||
zfs_vdev_def_queue_depth;
|
||||
}
|
||||
slots_per_allocator += zfs_vdev_def_queue_depth;
|
||||
}
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
|
||||
mca_alloc_slots));
|
||||
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
|
||||
mca_alloc_slots));
|
||||
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
|
||||
mca_alloc_slots));
|
||||
normal->mc_allocator[i].mca_alloc_max_slots =
|
||||
slots_per_allocator;
|
||||
special->mc_allocator[i].mca_alloc_max_slots =
|
||||
slots_per_allocator;
|
||||
dedup->mc_allocator[i].mca_alloc_max_slots =
|
||||
slots_per_allocator;
|
||||
}
|
||||
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
|
||||
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
|
||||
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
|
||||
metaslab_class_balance(spa_normal_class(spa), B_TRUE);
|
||||
metaslab_class_balance(spa_special_class(spa), B_TRUE);
|
||||
metaslab_class_balance(spa_dedup_class(spa), B_TRUE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -10156,12 +10105,6 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
spa->spa_syncing_txg = txg;
|
||||
spa->spa_sync_pass = 0;
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
mutex_enter(&spa->spa_allocs[i].spaa_lock);
|
||||
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
|
||||
mutex_exit(&spa->spa_allocs[i].spaa_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are any pending vdev state changes, convert them
|
||||
* into config changes that go out with this transaction group.
|
||||
@ -10274,12 +10217,6 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
|
||||
dsl_pool_sync_done(dp, txg);
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
mutex_enter(&spa->spa_allocs[i].spaa_lock);
|
||||
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
|
||||
mutex_exit(&spa->spa_allocs[i].spaa_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update usable space statistics.
|
||||
*/
|
||||
|
@ -759,14 +759,6 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
|
||||
spa->spa_alloc_count = MAX(MIN(spa_num_allocators,
|
||||
boot_ncpus / MAX(spa_cpus_per_allocator, 1)), 1);
|
||||
|
||||
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
|
||||
sizeof (spa_alloc_t), KM_SLEEP);
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
|
||||
NULL);
|
||||
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
|
||||
sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
|
||||
}
|
||||
if (spa->spa_alloc_count > 1) {
|
||||
spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t,
|
||||
sau_inuse[spa->spa_alloc_count]), KM_SLEEP);
|
||||
@ -862,12 +854,6 @@ spa_remove(spa_t *spa)
|
||||
kmem_free(dp, sizeof (spa_config_dirent_t));
|
||||
}
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
avl_destroy(&spa->spa_allocs[i].spaa_tree);
|
||||
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
|
||||
}
|
||||
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
|
||||
sizeof (spa_alloc_t));
|
||||
if (spa->spa_alloc_count > 1) {
|
||||
mutex_destroy(&spa->spa_allocs_use->sau_lock);
|
||||
kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t,
|
||||
@ -1318,11 +1304,11 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
|
||||
/*
|
||||
* Verify the metaslab classes.
|
||||
*/
|
||||
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
|
||||
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
|
||||
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
|
||||
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
|
||||
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
|
||||
metaslab_class_validate(spa_normal_class(spa));
|
||||
metaslab_class_validate(spa_log_class(spa));
|
||||
metaslab_class_validate(spa_embedded_log_class(spa));
|
||||
metaslab_class_validate(spa_special_class(spa));
|
||||
metaslab_class_validate(spa_dedup_class(spa));
|
||||
|
||||
spa_config_exit(spa, SCL_ALL, spa);
|
||||
|
||||
|
@ -149,7 +149,7 @@ static uint_t zfs_vdev_sync_write_max_active = 10;
|
||||
static uint_t zfs_vdev_async_read_min_active = 1;
|
||||
/* */ uint_t zfs_vdev_async_read_max_active = 3;
|
||||
static uint_t zfs_vdev_async_write_min_active = 2;
|
||||
/* */ uint_t zfs_vdev_async_write_max_active = 10;
|
||||
static uint_t zfs_vdev_async_write_max_active = 10;
|
||||
static uint_t zfs_vdev_scrub_min_active = 1;
|
||||
static uint_t zfs_vdev_scrub_max_active = 3;
|
||||
static uint_t zfs_vdev_removal_min_active = 1;
|
||||
@ -204,31 +204,6 @@ static uint_t zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE;
|
||||
static uint_t zfs_vdev_read_gap_limit = 32 << 10;
|
||||
static uint_t zfs_vdev_write_gap_limit = 4 << 10;
|
||||
|
||||
/*
|
||||
* Define the queue depth percentage for each top-level. This percentage is
|
||||
* used in conjunction with zfs_vdev_async_max_active to determine how many
|
||||
* allocations a specific top-level vdev should handle. Once the queue depth
|
||||
* reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
|
||||
* then allocator will stop allocating blocks on that top-level device.
|
||||
* The default kernel setting is 1000% which will yield 100 allocations per
|
||||
* device. For userland testing, the default setting is 300% which equates
|
||||
* to 30 allocations per device.
|
||||
*/
|
||||
#ifdef _KERNEL
|
||||
uint_t zfs_vdev_queue_depth_pct = 1000;
|
||||
#else
|
||||
uint_t zfs_vdev_queue_depth_pct = 300;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When performing allocations for a given metaslab, we want to make sure that
|
||||
* there are enough IOs to aggregate together to improve throughput. We want to
|
||||
* ensure that there are at least 128k worth of IOs that can be aggregated, and
|
||||
* we assume that the average allocation size is 4k, so we need the queue depth
|
||||
* to be 32 per allocator to get good aggregation of sequential writes.
|
||||
*/
|
||||
uint_t zfs_vdev_def_queue_depth = 32;
|
||||
|
||||
static int
|
||||
vdev_queue_offset_compare(const void *x1, const void *x2)
|
||||
{
|
||||
@ -1168,9 +1143,3 @@ ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, UINT, ZMOD_RW,
|
||||
|
||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, UINT, ZMOD_RW,
|
||||
"Number of non-interactive I/Os before _max_active");
|
||||
|
||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, UINT, ZMOD_RW,
|
||||
"Queue depth percentage for each top-level vdev");
|
||||
|
||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, def_queue_depth, UINT, ZMOD_RW,
|
||||
"Default queue depth for each allocator");
|
||||
|
@ -1172,10 +1172,10 @@ spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs,
|
||||
if (mc->mc_groups == 0)
|
||||
mc = spa_normal_class(spa);
|
||||
int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg,
|
||||
METASLAB_DONT_THROTTLE, zal, 0);
|
||||
0, zal, 0);
|
||||
if (error == ENOSPC && mc != spa_normal_class(spa)) {
|
||||
error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
|
||||
&dst, 0, NULL, txg, METASLAB_DONT_THROTTLE, zal, 0);
|
||||
&dst, 0, NULL, txg, 0, zal, 0);
|
||||
}
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
265
module/zfs/zio.c
265
module/zfs/zio.c
@ -3134,8 +3134,7 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
||||
abd_t *gbh_abd;
|
||||
uint64_t txg = pio->io_txg;
|
||||
uint64_t resid = pio->io_size;
|
||||
uint64_t lsize;
|
||||
int copies = gio->io_prop.zp_copies;
|
||||
uint64_t psize;
|
||||
zio_prop_t zp;
|
||||
int error;
|
||||
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
|
||||
@ -3150,47 +3149,18 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
||||
ASSERT3S(gbh_copies, <=, SPA_DVAS_PER_BP);
|
||||
|
||||
ASSERT(ZIO_HAS_ALLOCATOR(pio));
|
||||
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
|
||||
int flags = METASLAB_GANG_HEADER;
|
||||
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
|
||||
ASSERT(has_data);
|
||||
|
||||
flags |= METASLAB_ASYNC_ALLOC;
|
||||
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
|
||||
mca_alloc_slots, pio));
|
||||
|
||||
/*
|
||||
* The logical zio has already placed a reservation for
|
||||
* 'copies' allocation slots but gang blocks may require
|
||||
* additional copies. These additional copies
|
||||
* (i.e. gbh_copies - copies) are guaranteed to succeed
|
||||
* since metaslab_class_throttle_reserve() always allows
|
||||
* additional reservations for gang blocks.
|
||||
*/
|
||||
ASSERT3U(gbh_copies, >=, copies);
|
||||
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
|
||||
pio->io_allocator, pio, flags));
|
||||
}
|
||||
|
||||
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
|
||||
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
|
||||
&pio->io_alloc_list, pio, pio->io_allocator);
|
||||
&pio->io_alloc_list, pio->io_allocator, pio);
|
||||
if (error) {
|
||||
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
|
||||
ASSERT(has_data);
|
||||
|
||||
/*
|
||||
* If we failed to allocate the gang block header then
|
||||
* we remove any additional allocation reservations that
|
||||
* we placed here. The original reservation will
|
||||
* be removed when the logical I/O goes to the ready
|
||||
* stage.
|
||||
*/
|
||||
metaslab_class_throttle_unreserve(mc,
|
||||
gbh_copies - copies, pio->io_allocator, pio);
|
||||
}
|
||||
|
||||
pio->io_error = error;
|
||||
return (pio);
|
||||
}
|
||||
@ -3215,14 +3185,20 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
||||
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
|
||||
|
||||
zio_gang_inherit_allocator(pio, zio);
|
||||
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
boolean_t more;
|
||||
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies,
|
||||
zio, B_TRUE, &more));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create and nowait the gang children.
|
||||
*/
|
||||
for (int g = 0; resid != 0; resid -= lsize, g++) {
|
||||
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
|
||||
SPA_MINBLOCKSIZE);
|
||||
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
|
||||
for (int g = 0; resid != 0; resid -= psize, g++) {
|
||||
psize = zio_roundup_alloc_size(spa,
|
||||
resid / (SPA_GBH_NBLKPTRS - g));
|
||||
psize = MIN(resid, psize);
|
||||
ASSERT3U(psize, >=, SPA_MINBLOCKSIZE);
|
||||
|
||||
zp.zp_checksum = gio->io_prop.zp_checksum;
|
||||
zp.zp_compress = ZIO_COMPRESS_OFF;
|
||||
@ -3243,25 +3219,20 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
||||
|
||||
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
|
||||
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
|
||||
resid) : NULL, lsize, lsize, &zp,
|
||||
resid) : NULL, psize, psize, &zp,
|
||||
zio_write_gang_member_ready, NULL,
|
||||
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
|
||||
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
|
||||
|
||||
zio_gang_inherit_allocator(zio, cio);
|
||||
/*
|
||||
* We do not reserve for the child writes, since we already
|
||||
* reserved for the parent. Unreserve though will be called
|
||||
* for individual children. We can do this since sum of all
|
||||
* child's physical sizes is equal to parent's physical size.
|
||||
* It would not work for potentially bigger allocation sizes.
|
||||
*/
|
||||
|
||||
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
|
||||
ASSERT(has_data);
|
||||
|
||||
/*
|
||||
* Gang children won't throttle but we should
|
||||
* account for their work, so reserve an allocation
|
||||
* slot for them here.
|
||||
*/
|
||||
VERIFY(metaslab_class_throttle_reserve(mc,
|
||||
zp.zp_copies, cio->io_allocator, cio, flags));
|
||||
}
|
||||
zio_nowait(cio);
|
||||
}
|
||||
|
||||
@ -4029,15 +4000,17 @@ zio_ddt_free(zio_t *zio)
|
||||
*/
|
||||
|
||||
static zio_t *
|
||||
zio_io_to_allocate(spa_t *spa, int allocator)
|
||||
zio_io_to_allocate(metaslab_class_allocator_t *mca, boolean_t *more)
|
||||
{
|
||||
zio_t *zio;
|
||||
|
||||
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
|
||||
ASSERT(MUTEX_HELD(&mca->mca_lock));
|
||||
|
||||
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
|
||||
if (zio == NULL)
|
||||
zio = avl_first(&mca->mca_tree);
|
||||
if (zio == NULL) {
|
||||
*more = B_FALSE;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ASSERT(IO_IS_ALLOCATING(zio));
|
||||
ASSERT(ZIO_HAS_ALLOCATOR(zio));
|
||||
@ -4046,15 +4019,16 @@ zio_io_to_allocate(spa_t *spa, int allocator)
|
||||
* Try to place a reservation for this zio. If we're unable to
|
||||
* reserve then we throttle.
|
||||
*/
|
||||
ASSERT3U(zio->io_allocator, ==, allocator);
|
||||
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
|
||||
zio->io_prop.zp_copies, allocator, zio, 0)) {
|
||||
zio->io_prop.zp_copies, zio, B_FALSE, more)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
|
||||
avl_remove(&mca->mca_tree, zio);
|
||||
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
|
||||
|
||||
if (avl_is_empty(&mca->mca_tree))
|
||||
*more = B_FALSE;
|
||||
return (zio);
|
||||
}
|
||||
|
||||
@ -4064,9 +4038,14 @@ zio_dva_throttle(zio_t *zio)
|
||||
spa_t *spa = zio->io_spa;
|
||||
zio_t *nio;
|
||||
metaslab_class_t *mc;
|
||||
boolean_t more;
|
||||
|
||||
/* locate an appropriate allocation class */
|
||||
mc = spa_preferred_class(spa, zio);
|
||||
/*
|
||||
* If not already chosen, choose an appropriate allocation class.
|
||||
*/
|
||||
mc = zio->io_metaslab_class;
|
||||
if (mc == NULL)
|
||||
mc = spa_preferred_class(spa, zio);
|
||||
|
||||
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
|
||||
!mc->mc_alloc_throttle_enabled ||
|
||||
@ -4081,29 +4060,33 @@ zio_dva_throttle(zio_t *zio)
|
||||
ASSERT3U(zio->io_queued_timestamp, >, 0);
|
||||
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
|
||||
|
||||
int allocator = zio->io_allocator;
|
||||
zio->io_metaslab_class = mc;
|
||||
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
|
||||
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
|
||||
nio = zio_io_to_allocate(spa, allocator);
|
||||
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
|
||||
metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
|
||||
mutex_enter(&mca->mca_lock);
|
||||
avl_add(&mca->mca_tree, zio);
|
||||
nio = zio_io_to_allocate(mca, &more);
|
||||
mutex_exit(&mca->mca_lock);
|
||||
return (nio);
|
||||
}
|
||||
|
||||
static void
|
||||
zio_allocate_dispatch(spa_t *spa, int allocator)
|
||||
zio_allocate_dispatch(metaslab_class_t *mc, int allocator)
|
||||
{
|
||||
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
|
||||
zio_t *zio;
|
||||
boolean_t more;
|
||||
|
||||
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
|
||||
zio = zio_io_to_allocate(spa, allocator);
|
||||
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
|
||||
if (zio == NULL)
|
||||
return;
|
||||
do {
|
||||
mutex_enter(&mca->mca_lock);
|
||||
zio = zio_io_to_allocate(mca, &more);
|
||||
mutex_exit(&mca->mca_lock);
|
||||
if (zio == NULL)
|
||||
return;
|
||||
|
||||
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
|
||||
ASSERT0(zio->io_error);
|
||||
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
|
||||
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
|
||||
ASSERT0(zio->io_error);
|
||||
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
|
||||
} while (more);
|
||||
}
|
||||
|
||||
static zio_t *
|
||||
@ -4126,15 +4109,13 @@ zio_dva_allocate(zio_t *zio)
|
||||
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
|
||||
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
|
||||
|
||||
if (zio->io_flags & ZIO_FLAG_NODATA)
|
||||
flags |= METASLAB_DONT_THROTTLE;
|
||||
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
|
||||
flags |= METASLAB_GANG_CHILD;
|
||||
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
|
||||
flags |= METASLAB_ASYNC_ALLOC;
|
||||
|
||||
/*
|
||||
* if not already chosen, locate an appropriate allocation class
|
||||
* If not already chosen, choose an appropriate allocation class.
|
||||
*/
|
||||
mc = zio->io_metaslab_class;
|
||||
if (mc == NULL) {
|
||||
@ -4143,6 +4124,7 @@ zio_dva_allocate(zio_t *zio)
|
||||
}
|
||||
ZIOSTAT_BUMP(ziostat_total_allocations);
|
||||
|
||||
again:
|
||||
/*
|
||||
* Try allocating the block in the usual metaslab class.
|
||||
* If that's full, allocate it in the normal class.
|
||||
@ -4157,7 +4139,7 @@ zio_dva_allocate(zio_t *zio)
|
||||
ASSERT(ZIO_HAS_ALLOCATOR(zio));
|
||||
error = metaslab_alloc(spa, mc, zio->io_size, bp,
|
||||
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
|
||||
&zio->io_alloc_list, zio, zio->io_allocator);
|
||||
&zio->io_alloc_list, zio->io_allocator, zio);
|
||||
|
||||
/*
|
||||
* Fallback to normal class when an alloc class is full
|
||||
@ -4184,36 +4166,42 @@ zio_dva_allocate(zio_t *zio)
|
||||
}
|
||||
|
||||
/*
|
||||
* If throttling, transfer reservation over to normal class.
|
||||
* The io_allocator slot can remain the same even though we
|
||||
* are switching classes.
|
||||
* If we are holding old class reservation, drop it.
|
||||
* Dispatch the next ZIO(s) there if some are waiting.
|
||||
*/
|
||||
if (mc->mc_alloc_throttle_enabled &&
|
||||
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
|
||||
metaslab_class_throttle_unreserve(mc,
|
||||
zio->io_prop.zp_copies, zio->io_allocator, zio);
|
||||
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
if (metaslab_class_throttle_unreserve(mc,
|
||||
zio->io_prop.zp_copies, zio)) {
|
||||
zio_allocate_dispatch(zio->io_metaslab_class,
|
||||
zio->io_allocator);
|
||||
}
|
||||
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
|
||||
|
||||
VERIFY(metaslab_class_throttle_reserve(
|
||||
spa_normal_class(spa),
|
||||
zio->io_prop.zp_copies, zio->io_allocator, zio,
|
||||
flags | METASLAB_MUST_RESERVE));
|
||||
}
|
||||
zio->io_metaslab_class = mc = spa_normal_class(spa);
|
||||
|
||||
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
|
||||
zfs_dbgmsg("%s: metaslab allocation failure, "
|
||||
"trying normal class: zio %px, size %llu, error %d",
|
||||
spa_name(spa), zio, (u_longlong_t)zio->io_size,
|
||||
error);
|
||||
}
|
||||
|
||||
zio->io_metaslab_class = mc = spa_normal_class(spa);
|
||||
ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
|
||||
error = metaslab_alloc(spa, mc, zio->io_size, bp,
|
||||
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
|
||||
&zio->io_alloc_list, zio, zio->io_allocator);
|
||||
|
||||
/*
|
||||
* If normal class uses throttling, return to that pipeline
|
||||
* stage. Otherwise just do another allocation attempt.
|
||||
*/
|
||||
if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
|
||||
mc->mc_alloc_throttle_enabled &&
|
||||
zio->io_child_type != ZIO_CHILD_GANG &&
|
||||
!(zio->io_flags & ZIO_FLAG_NODATA)) {
|
||||
zio->io_stage = ZIO_STAGE_DVA_THROTTLE >> 1;
|
||||
return (zio);
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
|
||||
if (error == ENOSPC && zio->io_size > spa->spa_min_alloc) {
|
||||
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
|
||||
zfs_dbgmsg("%s: metaslab allocation failure, "
|
||||
"trying ganging: zio %px, size %llu, error %d",
|
||||
@ -4316,18 +4304,18 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
|
||||
% spa->spa_alloc_count;
|
||||
ZIOSTAT_BUMP(ziostat_total_allocations);
|
||||
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
|
||||
txg, NULL, flags, &io_alloc_list, NULL, allocator);
|
||||
txg, NULL, flags, &io_alloc_list, allocator, NULL);
|
||||
*slog = (error == 0);
|
||||
if (error != 0) {
|
||||
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
|
||||
new_bp, 1, txg, NULL, flags,
|
||||
&io_alloc_list, NULL, allocator);
|
||||
new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
|
||||
NULL);
|
||||
}
|
||||
if (error != 0) {
|
||||
ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
|
||||
error = metaslab_alloc(spa, spa_normal_class(spa), size,
|
||||
new_bp, 1, txg, NULL, flags,
|
||||
&io_alloc_list, NULL, allocator);
|
||||
new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
|
||||
NULL);
|
||||
}
|
||||
metaslab_trace_fini(&io_alloc_list);
|
||||
|
||||
@ -5155,10 +5143,12 @@ zio_ready(zio_t *zio)
|
||||
* We were unable to allocate anything, unreserve and
|
||||
* issue the next I/O to allocate.
|
||||
*/
|
||||
metaslab_class_throttle_unreserve(
|
||||
if (metaslab_class_throttle_unreserve(
|
||||
zio->io_metaslab_class, zio->io_prop.zp_copies,
|
||||
zio->io_allocator, zio);
|
||||
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
|
||||
zio)) {
|
||||
zio_allocate_dispatch(zio->io_metaslab_class,
|
||||
zio->io_allocator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5201,10 +5191,10 @@ zio_ready(zio_t *zio)
|
||||
static void
|
||||
zio_dva_throttle_done(zio_t *zio)
|
||||
{
|
||||
zio_t *lio __maybe_unused = zio->io_logical;
|
||||
zio_t *pio = zio_unique_parent(zio);
|
||||
vdev_t *vd = zio->io_vd;
|
||||
int flags = METASLAB_ASYNC_ALLOC;
|
||||
const void *tag = pio;
|
||||
|
||||
ASSERT3P(zio->io_bp, !=, NULL);
|
||||
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
|
||||
@ -5215,48 +5205,33 @@ zio_dva_throttle_done(zio_t *zio)
|
||||
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
|
||||
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
|
||||
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
|
||||
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
|
||||
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
|
||||
|
||||
/*
|
||||
* Parents of gang children can have two flavors -- ones that
|
||||
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
|
||||
* and ones that allocated the constituent blocks. The allocation
|
||||
* throttle needs to know the allocating parent zio so we must find
|
||||
* it here.
|
||||
* Parents of gang children can have two flavors -- ones that allocated
|
||||
* the gang header (will have ZIO_FLAG_IO_REWRITE set) and ones that
|
||||
* allocated the constituent blocks. The first use their parent as tag.
|
||||
*/
|
||||
if (pio->io_child_type == ZIO_CHILD_GANG) {
|
||||
/*
|
||||
* If our parent is a rewrite gang child then our grandparent
|
||||
* would have been the one that performed the allocation.
|
||||
*/
|
||||
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
|
||||
pio = zio_unique_parent(pio);
|
||||
flags |= METASLAB_GANG_CHILD;
|
||||
}
|
||||
if (pio->io_child_type == ZIO_CHILD_GANG &&
|
||||
(pio->io_flags & ZIO_FLAG_IO_REWRITE))
|
||||
tag = zio_unique_parent(pio);
|
||||
|
||||
ASSERT(IO_IS_ALLOCATING(pio));
|
||||
ASSERT(IO_IS_ALLOCATING(pio) || (pio->io_child_type == ZIO_CHILD_GANG &&
|
||||
(pio->io_flags & ZIO_FLAG_IO_REWRITE)));
|
||||
ASSERT(ZIO_HAS_ALLOCATOR(pio));
|
||||
ASSERT3P(zio, !=, zio->io_logical);
|
||||
ASSERT(zio->io_logical != NULL);
|
||||
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
|
||||
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
|
||||
ASSERT(zio->io_metaslab_class != NULL);
|
||||
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
|
||||
|
||||
mutex_enter(&pio->io_lock);
|
||||
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
|
||||
pio->io_allocator, B_TRUE);
|
||||
mutex_exit(&pio->io_lock);
|
||||
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id,
|
||||
pio->io_allocator, flags, pio->io_size, tag);
|
||||
|
||||
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
|
||||
pio->io_allocator, pio);
|
||||
|
||||
/*
|
||||
* Call into the pipeline to see if there is more work that
|
||||
* needs to be done. If there is work to be done it will be
|
||||
* dispatched to another taskq thread.
|
||||
*/
|
||||
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
|
||||
if (metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, pio)) {
|
||||
zio_allocate_dispatch(zio->io_metaslab_class,
|
||||
pio->io_allocator);
|
||||
}
|
||||
}
|
||||
|
||||
static zio_t *
|
||||
@ -5285,28 +5260,8 @@ zio_done(zio_t *zio)
|
||||
* by the logical I/O but the actual write is done by child I/Os.
|
||||
*/
|
||||
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
|
||||
zio->io_child_type == ZIO_CHILD_VDEV) {
|
||||
ASSERT(zio->io_metaslab_class != NULL);
|
||||
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
|
||||
zio->io_child_type == ZIO_CHILD_VDEV)
|
||||
zio_dva_throttle_done(zio);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the allocation throttle is enabled, verify that
|
||||
* we have decremented the refcounts for every I/O that was throttled.
|
||||
*/
|
||||
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
|
||||
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
|
||||
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
|
||||
ASSERT(zio->io_bp != NULL);
|
||||
ASSERT(ZIO_HAS_ALLOCATOR(zio));
|
||||
|
||||
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
|
||||
zio->io_allocator);
|
||||
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
|
||||
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
|
||||
}
|
||||
|
||||
|
||||
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
|
||||
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
|
||||
|
Loading…
Reference in New Issue
Block a user