mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 22:26:12 +00:00 
			
		
		
		
	 fa4062e7ea
			
		
	
	
		fa4062e7ea
		
	
	
	
	
		
			
			kernel_lock.c emits a warning because a raw spinlock function is used with a spinlock. Convert BKL to raw_spinlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
		
			
				
	
	
		
			144 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			144 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * lib/kernel_lock.c
 | |
|  *
 | |
|  * This is the traditional BKL - big kernel lock. Largely
 | |
|  * relegated to obsolescence, but used by various less
 | |
|  * important (or lazy) subsystems.
 | |
|  */
 | |
| #include <linux/module.h>
 | |
| #include <linux/kallsyms.h>
 | |
| #include <linux/semaphore.h>
 | |
| #include <linux/smp_lock.h>
 | |
| 
 | |
| #define CREATE_TRACE_POINTS
 | |
| #include <trace/events/bkl.h>
 | |
| 
 | |
| /*
 | |
|  * The 'big kernel lock'
 | |
|  *
 | |
|  * This spinlock is taken and released recursively by lock_kernel()
 | |
|  * and unlock_kernel().  It is transparently dropped and reacquired
 | |
|  * over schedule().  It is used to protect legacy code that hasn't
 | |
|  * been migrated to a proper locking design yet.
 | |
|  *
 | |
|  * Don't use in new code.
 | |
|  */
 | |
| static  __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
 | |
| 
 | |
| 
 | |
| /*
 | |
|  * Acquire/release the underlying lock from the scheduler.
 | |
|  *
 | |
|  * This is called with preemption disabled, and should
 | |
|  * return an error value if it cannot get the lock and
 | |
|  * TIF_NEED_RESCHED gets set.
 | |
|  *
 | |
|  * If it successfully gets the lock, it should increment
 | |
|  * the preemption count like any spinlock does.
 | |
|  *
 | |
|  * (This works on UP too - do_raw_spin_trylock will never
 | |
|  * return false in that case)
 | |
|  */
 | |
| int __lockfunc __reacquire_kernel_lock(void)
 | |
| {
 | |
| 	while (!do_raw_spin_trylock(&kernel_flag)) {
 | |
| 		if (need_resched())
 | |
| 			return -EAGAIN;
 | |
| 		cpu_relax();
 | |
| 	}
 | |
| 	preempt_disable();
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| void __lockfunc __release_kernel_lock(void)
 | |
| {
 | |
| 	do_raw_spin_unlock(&kernel_flag);
 | |
| 	preempt_enable_no_resched();
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * These are the BKL spinlocks - we try to be polite about preemption.
 | |
|  * If SMP is not on (ie UP preemption), this all goes away because the
 | |
|  * do_raw_spin_trylock() will always succeed.
 | |
|  */
 | |
| #ifdef CONFIG_PREEMPT
 | |
| static inline void __lock_kernel(void)
 | |
| {
 | |
| 	preempt_disable();
 | |
| 	if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
 | |
| 		/*
 | |
| 		 * If preemption was disabled even before this
 | |
| 		 * was called, there's nothing we can be polite
 | |
| 		 * about - just spin.
 | |
| 		 */
 | |
| 		if (preempt_count() > 1) {
 | |
| 			do_raw_spin_lock(&kernel_flag);
 | |
| 			return;
 | |
| 		}
 | |
| 
 | |
| 		/*
 | |
| 		 * Otherwise, let's wait for the kernel lock
 | |
| 		 * with preemption enabled..
 | |
| 		 */
 | |
| 		do {
 | |
| 			preempt_enable();
 | |
| 			while (raw_spin_is_locked(&kernel_flag))
 | |
| 				cpu_relax();
 | |
| 			preempt_disable();
 | |
| 		} while (!do_raw_spin_trylock(&kernel_flag));
 | |
| 	}
 | |
| }
 | |
| 
 | |
| #else
 | |
| 
 | |
| /*
 | |
|  * Non-preemption case - just get the spinlock
 | |
|  */
 | |
| static inline void __lock_kernel(void)
 | |
| {
 | |
| 	do_raw_spin_lock(&kernel_flag);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static inline void __unlock_kernel(void)
 | |
| {
 | |
| 	/*
 | |
| 	 * the BKL is not covered by lockdep, so we open-code the
 | |
| 	 * unlocking sequence (and thus avoid the dep-chain ops):
 | |
| 	 */
 | |
| 	do_raw_spin_unlock(&kernel_flag);
 | |
| 	preempt_enable();
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Getting the big kernel lock.
 | |
|  *
 | |
|  * This cannot happen asynchronously, so we only need to
 | |
|  * worry about other CPU's.
 | |
|  */
 | |
| void __lockfunc _lock_kernel(const char *func, const char *file, int line)
 | |
| {
 | |
| 	int depth = current->lock_depth + 1;
 | |
| 
 | |
| 	trace_lock_kernel(func, file, line);
 | |
| 
 | |
| 	if (likely(!depth)) {
 | |
| 		might_sleep();
 | |
| 		__lock_kernel();
 | |
| 	}
 | |
| 	current->lock_depth = depth;
 | |
| }
 | |
| 
 | |
| void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
 | |
| {
 | |
| 	BUG_ON(current->lock_depth < 0);
 | |
| 	if (likely(--current->lock_depth < 0))
 | |
| 		__unlock_kernel();
 | |
| 
 | |
| 	trace_unlock_kernel(func, file, line);
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(_lock_kernel);
 | |
| EXPORT_SYMBOL(_unlock_kernel);
 | |
| 
 |