linux-loongson/include/linux/sched/idle.h
Ingo Molnar cac5cefbad sched/smp: Make SMP unconditional
Simplify the scheduler by making CONFIG_SMP=y primitives and data
structures unconditional.

Introduce transitory wrappers for functionality not yet converted to SMP.

Note that this patch is pretty large, because there's no clear separation
between various aspects of the SMP scheduler, it's basically a huge block
of #ifdef CONFIG_SMP. A fair amount of it has to be switched on for it to
boot and work on UP systems.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-21-mingo@kernel.org
2025-06-13 08:47:18 +02:00

117 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_IDLE_H
#define _LINUX_SCHED_IDLE_H
#include <linux/sched.h>
enum cpu_idle_type {
__CPU_NOT_IDLE = 0,
CPU_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
extern void wake_up_if_idle(int cpu);
/*
* Idle thread specific functions to determine the need_resched
* polling state.
*/
#ifdef TIF_POLLING_NRFLAG
#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
static __always_inline void __current_set_polling(void)
{
arch_set_bit(TIF_POLLING_NRFLAG,
(unsigned long *)(&current_thread_info()->flags));
}
static __always_inline void __current_clr_polling(void)
{
arch_clear_bit(TIF_POLLING_NRFLAG,
(unsigned long *)(&current_thread_info()->flags));
}
#else
static __always_inline void __current_set_polling(void)
{
set_bit(TIF_POLLING_NRFLAG,
(unsigned long *)(&current_thread_info()->flags));
}
static __always_inline void __current_clr_polling(void)
{
clear_bit(TIF_POLLING_NRFLAG,
(unsigned long *)(&current_thread_info()->flags));
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
/*
* Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
* Once the bit is cleared, we'll get IPIs with every new
* TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
* fold.
*/
smp_mb__after_atomic(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
#else
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
static inline bool __must_check current_set_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
#endif
#endif /* _LINUX_SCHED_IDLE_H */