mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-27 06:50:37 +00:00

With the goal of deprecating / removing VOLUNTARY preempt, live-patch needs to stop relying on cond_resched() to make forward progress. Instead, rely on schedule() with TASK_FREEZABLE set. Just like live-patching, the freezer needs to be able to stop tasks in a safe / known state. [bigeasy: use likely() in __klp_sched_try_switch() and update comments] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Tested-by: Petr Mladek <pmladek@suse.com> Tested-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> Link: https://lore.kernel.org/r/20250509113659.wkP_HJ5z@linutronix.de
26 lines
670 B
C
26 lines
670 B
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef _LINUX_LIVEPATCH_SCHED_H_
|
|
#define _LINUX_LIVEPATCH_SCHED_H_
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <linux/sched.h>
|
|
|
|
#ifdef CONFIG_LIVEPATCH
|
|
|
|
void __klp_sched_try_switch(void);
|
|
|
|
DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
|
|
|
|
static __always_inline void klp_sched_try_switch(struct task_struct *curr)
|
|
{
|
|
if (static_branch_unlikely(&klp_sched_try_switch_key) &&
|
|
READ_ONCE(curr->__state) & TASK_FREEZABLE)
|
|
__klp_sched_try_switch();
|
|
}
|
|
|
|
#else /* !CONFIG_LIVEPATCH */
|
|
static inline void klp_sched_try_switch(struct task_struct *curr) {}
|
|
#endif /* CONFIG_LIVEPATCH */
|
|
|
|
#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
|