mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-07 21:14:03 +00:00
sched_ext: Take out ->priq and ->flags from scx_dsq_node
struct scx_dsq_node contains two data structure nodes to link the containing task to a DSQ and a flags field that is protected by the lock of the associated DSQ. One reason why they are grouped into a struct is to use the type independently as a cursor node when iterating tasks on a DSQ. However, when iterating, the cursor only needs to be linked on the FIFO list and the rb_node part ends up inflating the size of the iterator data structure unnecessarily making it potentially too expensive to place it on stack. Take ->priq and ->flags out of scx_dsq_node and put them in sched_ext_entity as ->dsq_priq and ->dsq_flags, respectively. scx_dsq_node is renamed to scx_dsq_list_node and the field names are renamed accordingly. This will help implementing DSQ task iterator that can be allocated on stack. No functional change intended. Signed-off-by: Tejun Heo <tj@kernel.org> Suggested-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Alexei Starovoitov <ast@kernel.org> Cc: David Vernet <void@manifault.com>
This commit is contained in:
parent
e196c908f9
commit
d4af01c373
@ -121,10 +121,8 @@ enum scx_kf_mask {
|
|||||||
__SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
|
__SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct scx_dsq_node {
|
struct scx_dsq_list_node {
|
||||||
struct list_head list; /* dispatch order */
|
struct list_head node;
|
||||||
struct rb_node priq; /* p->scx.dsq_vtime order */
|
|
||||||
u32 flags; /* SCX_TASK_DSQ_* flags */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -133,7 +131,9 @@ struct scx_dsq_node {
|
|||||||
*/
|
*/
|
||||||
struct sched_ext_entity {
|
struct sched_ext_entity {
|
||||||
struct scx_dispatch_q *dsq;
|
struct scx_dispatch_q *dsq;
|
||||||
struct scx_dsq_node dsq_node; /* protected by dsq lock */
|
struct scx_dsq_list_node dsq_list; /* dispatch order */
|
||||||
|
struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
|
||||||
|
u32 dsq_flags; /* protected by DSQ lock */
|
||||||
u32 flags; /* protected by rq lock */
|
u32 flags; /* protected by rq lock */
|
||||||
u32 weight;
|
u32 weight;
|
||||||
s32 sticky_cpu;
|
s32 sticky_cpu;
|
||||||
|
|||||||
@ -102,7 +102,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SCHED_CLASS_EXT
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
||||||
.scx = {
|
.scx = {
|
||||||
.dsq_node.list = LIST_HEAD_INIT(init_task.scx.dsq_node.list),
|
.dsq_list.node = LIST_HEAD_INIT(init_task.scx.dsq_list.node),
|
||||||
.sticky_cpu = -1,
|
.sticky_cpu = -1,
|
||||||
.holding_cpu = -1,
|
.holding_cpu = -1,
|
||||||
.runnable_node = LIST_HEAD_INIT(init_task.scx.runnable_node),
|
.runnable_node = LIST_HEAD_INIT(init_task.scx.runnable_node),
|
||||||
|
|||||||
@ -1360,9 +1360,9 @@ static bool scx_dsq_priq_less(struct rb_node *node_a,
|
|||||||
const struct rb_node *node_b)
|
const struct rb_node *node_b)
|
||||||
{
|
{
|
||||||
const struct task_struct *a =
|
const struct task_struct *a =
|
||||||
container_of(node_a, struct task_struct, scx.dsq_node.priq);
|
container_of(node_a, struct task_struct, scx.dsq_priq);
|
||||||
const struct task_struct *b =
|
const struct task_struct *b =
|
||||||
container_of(node_b, struct task_struct, scx.dsq_node.priq);
|
container_of(node_b, struct task_struct, scx.dsq_priq);
|
||||||
|
|
||||||
return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
|
return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
|
||||||
}
|
}
|
||||||
@ -1378,9 +1378,9 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
|
|||||||
{
|
{
|
||||||
bool is_local = dsq->id == SCX_DSQ_LOCAL;
|
bool is_local = dsq->id == SCX_DSQ_LOCAL;
|
||||||
|
|
||||||
WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_node.list));
|
WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
|
||||||
WARN_ON_ONCE((p->scx.dsq_node.flags & SCX_TASK_DSQ_ON_PRIQ) ||
|
WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
|
||||||
!RB_EMPTY_NODE(&p->scx.dsq_node.priq));
|
!RB_EMPTY_NODE(&p->scx.dsq_priq));
|
||||||
|
|
||||||
if (!is_local) {
|
if (!is_local) {
|
||||||
raw_spin_lock(&dsq->lock);
|
raw_spin_lock(&dsq->lock);
|
||||||
@ -1419,21 +1419,21 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
|
|||||||
scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
|
scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
|
||||||
dsq->id);
|
dsq->id);
|
||||||
|
|
||||||
p->scx.dsq_node.flags |= SCX_TASK_DSQ_ON_PRIQ;
|
p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
|
||||||
rb_add(&p->scx.dsq_node.priq, &dsq->priq, scx_dsq_priq_less);
|
rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the previous task and insert after it on the list so
|
* Find the previous task and insert after it on the list so
|
||||||
* that @dsq->list is vtime ordered.
|
* that @dsq->list is vtime ordered.
|
||||||
*/
|
*/
|
||||||
rbp = rb_prev(&p->scx.dsq_node.priq);
|
rbp = rb_prev(&p->scx.dsq_priq);
|
||||||
if (rbp) {
|
if (rbp) {
|
||||||
struct task_struct *prev =
|
struct task_struct *prev =
|
||||||
container_of(rbp, struct task_struct,
|
container_of(rbp, struct task_struct,
|
||||||
scx.dsq_node.priq);
|
scx.dsq_priq);
|
||||||
list_add(&p->scx.dsq_node.list, &prev->scx.dsq_node.list);
|
list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
|
||||||
} else {
|
} else {
|
||||||
list_add(&p->scx.dsq_node.list, &dsq->list);
|
list_add(&p->scx.dsq_list.node, &dsq->list);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
|
/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
|
||||||
@ -1442,9 +1442,9 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
|
|||||||
dsq->id);
|
dsq->id);
|
||||||
|
|
||||||
if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
|
if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
|
||||||
list_add(&p->scx.dsq_node.list, &dsq->list);
|
list_add(&p->scx.dsq_list.node, &dsq->list);
|
||||||
else
|
else
|
||||||
list_add_tail(&p->scx.dsq_node.list, &dsq->list);
|
list_add_tail(&p->scx.dsq_list.node, &dsq->list);
|
||||||
}
|
}
|
||||||
|
|
||||||
dsq_mod_nr(dsq, 1);
|
dsq_mod_nr(dsq, 1);
|
||||||
@ -1487,18 +1487,18 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
|
|||||||
static void task_unlink_from_dsq(struct task_struct *p,
|
static void task_unlink_from_dsq(struct task_struct *p,
|
||||||
struct scx_dispatch_q *dsq)
|
struct scx_dispatch_q *dsq)
|
||||||
{
|
{
|
||||||
if (p->scx.dsq_node.flags & SCX_TASK_DSQ_ON_PRIQ) {
|
if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
|
||||||
rb_erase(&p->scx.dsq_node.priq, &dsq->priq);
|
rb_erase(&p->scx.dsq_priq, &dsq->priq);
|
||||||
RB_CLEAR_NODE(&p->scx.dsq_node.priq);
|
RB_CLEAR_NODE(&p->scx.dsq_priq);
|
||||||
p->scx.dsq_node.flags &= ~SCX_TASK_DSQ_ON_PRIQ;
|
p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del_init(&p->scx.dsq_node.list);
|
list_del_init(&p->scx.dsq_list.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool task_linked_on_dsq(struct task_struct *p)
|
static bool task_linked_on_dsq(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return !list_empty(&p->scx.dsq_node.list);
|
return !list_empty(&p->scx.dsq_list.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
|
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
|
||||||
@ -1523,8 +1523,8 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
|
|||||||
raw_spin_lock(&dsq->lock);
|
raw_spin_lock(&dsq->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_node
|
* Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
|
||||||
* can't change underneath us.
|
* change underneath us.
|
||||||
*/
|
*/
|
||||||
if (p->scx.holding_cpu < 0) {
|
if (p->scx.holding_cpu < 0) {
|
||||||
/* @p must still be on @dsq, dequeue */
|
/* @p must still be on @dsq, dequeue */
|
||||||
@ -2034,7 +2034,7 @@ static void consume_local_task(struct rq *rq, struct scx_dispatch_q *dsq,
|
|||||||
/* @dsq is locked and @p is on this rq */
|
/* @dsq is locked and @p is on this rq */
|
||||||
WARN_ON_ONCE(p->scx.holding_cpu >= 0);
|
WARN_ON_ONCE(p->scx.holding_cpu >= 0);
|
||||||
task_unlink_from_dsq(p, dsq);
|
task_unlink_from_dsq(p, dsq);
|
||||||
list_add_tail(&p->scx.dsq_node.list, &rq->scx.local_dsq.list);
|
list_add_tail(&p->scx.dsq_list.node, &rq->scx.local_dsq.list);
|
||||||
dsq_mod_nr(dsq, -1);
|
dsq_mod_nr(dsq, -1);
|
||||||
dsq_mod_nr(&rq->scx.local_dsq, 1);
|
dsq_mod_nr(&rq->scx.local_dsq, 1);
|
||||||
p->scx.dsq = &rq->scx.local_dsq;
|
p->scx.dsq = &rq->scx.local_dsq;
|
||||||
@ -2109,7 +2109,7 @@ retry:
|
|||||||
|
|
||||||
raw_spin_lock(&dsq->lock);
|
raw_spin_lock(&dsq->lock);
|
||||||
|
|
||||||
list_for_each_entry(p, &dsq->list, scx.dsq_node.list) {
|
list_for_each_entry(p, &dsq->list, scx.dsq_list.node) {
|
||||||
struct rq *task_rq = task_rq(p);
|
struct rq *task_rq = task_rq(p);
|
||||||
|
|
||||||
if (rq == task_rq) {
|
if (rq == task_rq) {
|
||||||
@ -2628,7 +2628,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p)
|
|||||||
static struct task_struct *first_local_task(struct rq *rq)
|
static struct task_struct *first_local_task(struct rq *rq)
|
||||||
{
|
{
|
||||||
return list_first_entry_or_null(&rq->scx.local_dsq.list,
|
return list_first_entry_or_null(&rq->scx.local_dsq.list,
|
||||||
struct task_struct, scx.dsq_node.list);
|
struct task_struct, scx.dsq_list.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_scx(struct rq *rq)
|
static struct task_struct *pick_next_task_scx(struct rq *rq)
|
||||||
@ -3309,8 +3309,8 @@ void init_scx_entity(struct sched_ext_entity *scx)
|
|||||||
*/
|
*/
|
||||||
memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
|
memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
|
||||||
|
|
||||||
INIT_LIST_HEAD(&scx->dsq_node.list);
|
INIT_LIST_HEAD(&scx->dsq_list.node);
|
||||||
RB_CLEAR_NODE(&scx->dsq_node.priq);
|
RB_CLEAR_NODE(&scx->dsq_priq);
|
||||||
scx->sticky_cpu = -1;
|
scx->sticky_cpu = -1;
|
||||||
scx->holding_cpu = -1;
|
scx->holding_cpu = -1;
|
||||||
INIT_LIST_HEAD(&scx->runnable_node);
|
INIT_LIST_HEAD(&scx->runnable_node);
|
||||||
@ -4160,7 +4160,7 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
|
|||||||
jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
|
jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
|
||||||
dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
|
dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
|
||||||
scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
|
scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
|
||||||
p->scx.dsq_node.flags, ops_state & SCX_OPSS_STATE_MASK,
|
p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
|
||||||
ops_state >> SCX_OPSS_QSEQ_SHIFT);
|
ops_state >> SCX_OPSS_QSEQ_SHIFT);
|
||||||
dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
|
dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
|
||||||
p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
|
p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user