mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-02 08:32:55 +00:00
block: blk-rq-qos: guard rq-qos helpers by static key
Even if blk-rq-qos isn't used or configured, dipping into the queue to fetch ->rq_qos is a noticeable slowdown and visible in profiles. Add an unlikely static key around blk-rq-qos, to avoid fetching this cacheline if blk-iolatency or blk-wbt isn't configured or used. Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9b79f86e06
commit
033b667a82
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
|
|
||||||
|
__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
||||||
* false if 'v' + 1 would be bigger than 'below'.
|
* false if 'v' + 1 would be bigger than 'below'.
|
||||||
@ -317,6 +319,7 @@ void rq_qos_exit(struct request_queue *q)
|
|||||||
struct rq_qos *rqos = q->rq_qos;
|
struct rq_qos *rqos = q->rq_qos;
|
||||||
q->rq_qos = rqos->next;
|
q->rq_qos = rqos->next;
|
||||||
rqos->ops->exit(rqos);
|
rqos->ops->exit(rqos);
|
||||||
|
static_branch_dec(&block_rq_qos);
|
||||||
}
|
}
|
||||||
mutex_unlock(&q->rq_qos_mutex);
|
mutex_unlock(&q->rq_qos_mutex);
|
||||||
}
|
}
|
||||||
@ -343,6 +346,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
|
|||||||
goto ebusy;
|
goto ebusy;
|
||||||
rqos->next = q->rq_qos;
|
rqos->next = q->rq_qos;
|
||||||
q->rq_qos = rqos;
|
q->rq_qos = rqos;
|
||||||
|
static_branch_inc(&block_rq_qos);
|
||||||
|
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include "blk-mq-debugfs.h"
|
#include "blk-mq-debugfs.h"
|
||||||
|
|
||||||
struct blk_mq_debugfs_attr;
|
struct blk_mq_debugfs_attr;
|
||||||
|
extern struct static_key_false block_rq_qos;
|
||||||
|
|
||||||
enum rq_qos_id {
|
enum rq_qos_id {
|
||||||
RQ_QOS_WBT,
|
RQ_QOS_WBT,
|
||||||
@ -112,31 +113,33 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
|
|||||||
|
|
||||||
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (q->rq_qos)
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
||||||
__rq_qos_cleanup(q->rq_qos, bio);
|
__rq_qos_cleanup(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (q->rq_qos && !blk_rq_is_passthrough(rq))
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
|
||||||
|
!blk_rq_is_passthrough(rq))
|
||||||
__rq_qos_done(q->rq_qos, rq);
|
__rq_qos_done(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (q->rq_qos)
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
||||||
__rq_qos_issue(q->rq_qos, rq);
|
__rq_qos_issue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (q->rq_qos)
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
||||||
__rq_qos_requeue(q->rq_qos, rq);
|
__rq_qos_requeue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done_bio(struct bio *bio)
|
static inline void rq_qos_done_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
|
if (static_branch_unlikely(&block_rq_qos) &&
|
||||||
|
bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
|
||||||
bio_flagged(bio, BIO_QOS_MERGED))) {
|
bio_flagged(bio, BIO_QOS_MERGED))) {
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
if (q->rq_qos)
|
if (q->rq_qos)
|
||||||
@ -146,7 +149,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
|
|||||||
|
|
||||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (q->rq_qos) {
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
||||||
__rq_qos_throttle(q->rq_qos, bio);
|
__rq_qos_throttle(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
@ -155,14 +158,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
|||||||
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (q->rq_qos)
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
||||||
__rq_qos_track(q->rq_qos, rq, bio);
|
__rq_qos_track(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (q->rq_qos) {
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_MERGED);
|
bio_set_flag(bio, BIO_QOS_MERGED);
|
||||||
__rq_qos_merge(q->rq_qos, rq, bio);
|
__rq_qos_merge(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
@ -170,7 +173,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
|||||||
|
|
||||||
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->rq_qos)
|
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
||||||
__rq_qos_queue_depth_changed(q->rq_qos);
|
__rq_qos_queue_depth_changed(q->rq_qos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user