blk-throttle: carry over directly

Now ->carryover_bytes[] and ->carryover_ios[] only covers limit/config
update.

Actually the carryover bytes/ios can be carried to ->bytes_disp[] and
->io_disp[] directly, since the carryover is one-shot thing and only valid
in current slice.

Then we can remove the two fields and simplify code much.

Type of ->bytes_disp[] and ->io_disp[] has to change as signed because the
two fields may become negative when updating limits or config, but both are
big enough for holding bytes/ios dispatched in single slice

Cc: Tejun Heo <tj@kernel.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20250305043123.3938491-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2025-03-05 12:31:21 +08:00 committed by Jens Axboe
parent a9fc8868b3
commit 6cc477c368
2 changed files with 23 additions and 30 deletions

View File

@ -478,8 +478,6 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
{ {
tg->bytes_disp[rw] = 0; tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
tg->carryover_bytes[rw] = 0;
tg->carryover_ios[rw] = 0;
/* /*
* Previous slice has expired. We must have trimmed it after last * Previous slice has expired. We must have trimmed it after last
@ -498,16 +496,14 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
} }
static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw, static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
bool clear_carryover) bool clear)
{ {
if (clear) {
tg->bytes_disp[rw] = 0; tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
}
tg->slice_start[rw] = jiffies; tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice; tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
if (clear_carryover) {
tg->carryover_bytes[rw] = 0;
tg->carryover_ios[rw] = 0;
}
throtl_log(&tg->service_queue, throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu", "[%c] new slice start=%lu end=%lu jiffies=%lu",
@ -617,20 +613,16 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
*/ */
time_elapsed -= tg->td->throtl_slice; time_elapsed -= tg->td->throtl_slice;
bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw), bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
time_elapsed) + time_elapsed);
tg->carryover_bytes[rw]; io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed);
io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
tg->carryover_ios[rw];
if (bytes_trim <= 0 && io_trim <= 0) if (bytes_trim <= 0 && io_trim <= 0)
return; return;
tg->carryover_bytes[rw] = 0;
if ((long long)tg->bytes_disp[rw] >= bytes_trim) if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim; tg->bytes_disp[rw] -= bytes_trim;
else else
tg->bytes_disp[rw] = 0; tg->bytes_disp[rw] = 0;
tg->carryover_ios[rw] = 0;
if ((int)tg->io_disp[rw] >= io_trim) if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim; tg->io_disp[rw] -= io_trim;
else else
@ -645,7 +637,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
jiffies); jiffies);
} }
static void __tg_update_carryover(struct throtl_grp *tg, bool rw) static void __tg_update_carryover(struct throtl_grp *tg, bool rw,
long long *bytes, int *ios)
{ {
unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
u64 bps_limit = tg_bps_limit(tg, rw); u64 bps_limit = tg_bps_limit(tg, rw);
@ -658,26 +651,28 @@ static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
* configuration. * configuration.
*/ */
if (bps_limit != U64_MAX) if (bps_limit != U64_MAX)
tg->carryover_bytes[rw] += *bytes = calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
tg->bytes_disp[rw]; tg->bytes_disp[rw];
if (iops_limit != UINT_MAX) if (iops_limit != UINT_MAX)
tg->carryover_ios[rw] += *ios = calculate_io_allowed(iops_limit, jiffy_elapsed) -
calculate_io_allowed(iops_limit, jiffy_elapsed) -
tg->io_disp[rw]; tg->io_disp[rw];
tg->bytes_disp[rw] -= *bytes;
tg->io_disp[rw] -= *ios;
} }
static void tg_update_carryover(struct throtl_grp *tg) static void tg_update_carryover(struct throtl_grp *tg)
{ {
long long bytes[2] = {0};
int ios[2] = {0};
if (tg->service_queue.nr_queued[READ]) if (tg->service_queue.nr_queued[READ])
__tg_update_carryover(tg, READ); __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
if (tg->service_queue.nr_queued[WRITE]) if (tg->service_queue.nr_queued[WRITE])
__tg_update_carryover(tg, WRITE); __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
/* see comments in struct throtl_grp for meaning of these fields. */ /* see comments in struct throtl_grp for meaning of these fields. */
throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__, throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
tg->carryover_bytes[READ], tg->carryover_bytes[WRITE], bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]);
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
} }
static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio, static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
@ -695,8 +690,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
/* Round up to the next throttle slice, wait time must be nonzero */ /* Round up to the next throttle slice, wait time must be nonzero */
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) + io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd);
tg->carryover_ios[rw];
if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed) if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
return 0; return 0;
@ -729,8 +723,7 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = tg->td->throtl_slice; jiffy_elapsed_rnd = tg->td->throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) + bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
tg->carryover_bytes[rw];
if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
return 0; return 0;

View File

@ -102,9 +102,9 @@ struct throtl_grp {
unsigned int iops[2]; unsigned int iops[2];
/* Number of bytes dispatched in current slice */ /* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2]; int64_t bytes_disp[2];
/* Number of bio's dispatched in current slice */ /* Number of bio's dispatched in current slice */
unsigned int io_disp[2]; int io_disp[2];
/* /*
* The following two fields are updated when new configuration is * The following two fields are updated when new configuration is