mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 05:14:38 +00:00 
			
		
		
		
	 21d71f513b
			
		
	
	
		21d71f513b
		
	
	
	
	
		
			
			allyesconfig vmlinux size delta: text data bss dec filename 20736884 6073834 3075176 29885894 vmlinux.before 20721009 6073966 3075176 29870151 vmlinux.after ~18 bytes per callsite, 15K of text size (~0.1%) saved. (as an added bonus this also removes a lockdep annotation.) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
		
			
				
	
	
		
			254 lines
		
	
	
		
			7.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			254 lines
		
	
	
		
			7.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Generic waiting primitives.
 | |
|  *
 | |
|  * (C) 2004 William Irwin, Oracle
 | |
|  */
 | |
| #include <linux/init.h>
 | |
| #include <linux/module.h>
 | |
| #include <linux/sched.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/wait.h>
 | |
| #include <linux/hash.h>
 | |
| 
 | |
| void init_waitqueue_head(wait_queue_head_t *q)
 | |
| {
 | |
| 	spin_lock_init(&q->lock);
 | |
| 	INIT_LIST_HEAD(&q->task_list);
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(init_waitqueue_head);
 | |
| 
 | |
| void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
 | |
| 	spin_lock_irqsave(&q->lock, flags);
 | |
| 	__add_wait_queue(q, wait);
 | |
| 	spin_unlock_irqrestore(&q->lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(add_wait_queue);
 | |
| 
 | |
| void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | |
| 	spin_lock_irqsave(&q->lock, flags);
 | |
| 	__add_wait_queue_tail(q, wait);
 | |
| 	spin_unlock_irqrestore(&q->lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(add_wait_queue_exclusive);
 | |
| 
 | |
| void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&q->lock, flags);
 | |
| 	__remove_wait_queue(q, wait);
 | |
| 	spin_unlock_irqrestore(&q->lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(remove_wait_queue);
 | |
| 
 | |
| 
 | |
| /*
 | |
|  * Note: we use "set_current_state()" _after_ the wait-queue add,
 | |
|  * because we need a memory barrier there on SMP, so that any
 | |
|  * wake-function that tests for the wait-queue being active
 | |
|  * will be guaranteed to see waitqueue addition _or_ subsequent
 | |
|  * tests in this thread will see the wakeup having taken place.
 | |
|  *
 | |
|  * The spin_unlock() itself is semi-permeable and only protects
 | |
|  * one way (it only protects stuff inside the critical region and
 | |
|  * stops them from bleeding out - it would still allow subsequent
 | |
|  * loads to move into the the critical region).
 | |
|  */
 | |
| void fastcall
 | |
| prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
 | |
| 	spin_lock_irqsave(&q->lock, flags);
 | |
| 	if (list_empty(&wait->task_list))
 | |
| 		__add_wait_queue(q, wait);
 | |
| 	/*
 | |
| 	 * don't alter the task state if this is just going to
 | |
| 	 * queue an async wait queue callback
 | |
| 	 */
 | |
| 	if (is_sync_wait(wait))
 | |
| 		set_current_state(state);
 | |
| 	spin_unlock_irqrestore(&q->lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(prepare_to_wait);
 | |
| 
 | |
| void fastcall
 | |
| prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	wait->flags |= WQ_FLAG_EXCLUSIVE;
 | |
| 	spin_lock_irqsave(&q->lock, flags);
 | |
| 	if (list_empty(&wait->task_list))
 | |
| 		__add_wait_queue_tail(q, wait);
 | |
| 	/*
 | |
| 	 * don't alter the task state if this is just going to
 | |
|  	 * queue an async wait queue callback
 | |
| 	 */
 | |
| 	if (is_sync_wait(wait))
 | |
| 		set_current_state(state);
 | |
| 	spin_unlock_irqrestore(&q->lock, flags);
 | |
| }
 | |
| EXPORT_SYMBOL(prepare_to_wait_exclusive);
 | |
| 
 | |
| void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	__set_current_state(TASK_RUNNING);
 | |
| 	/*
 | |
| 	 * We can check for list emptiness outside the lock
 | |
| 	 * IFF:
 | |
| 	 *  - we use the "careful" check that verifies both
 | |
| 	 *    the next and prev pointers, so that there cannot
 | |
| 	 *    be any half-pending updates in progress on other
 | |
| 	 *    CPU's that we haven't seen yet (and that might
 | |
| 	 *    still change the stack area.
 | |
| 	 * and
 | |
| 	 *  - all other users take the lock (ie we can only
 | |
| 	 *    have _one_ other CPU that looks at or modifies
 | |
| 	 *    the list).
 | |
| 	 */
 | |
| 	if (!list_empty_careful(&wait->task_list)) {
 | |
| 		spin_lock_irqsave(&q->lock, flags);
 | |
| 		list_del_init(&wait->task_list);
 | |
| 		spin_unlock_irqrestore(&q->lock, flags);
 | |
| 	}
 | |
| }
 | |
| EXPORT_SYMBOL(finish_wait);
 | |
| 
 | |
| int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 | |
| {
 | |
| 	int ret = default_wake_function(wait, mode, sync, key);
 | |
| 
 | |
| 	if (ret)
 | |
| 		list_del_init(&wait->task_list);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL(autoremove_wake_function);
 | |
| 
 | |
| int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
 | |
| {
 | |
| 	struct wait_bit_key *key = arg;
 | |
| 	struct wait_bit_queue *wait_bit
 | |
| 		= container_of(wait, struct wait_bit_queue, wait);
 | |
| 
 | |
| 	if (wait_bit->key.flags != key->flags ||
 | |
| 			wait_bit->key.bit_nr != key->bit_nr ||
 | |
| 			test_bit(key->bit_nr, key->flags))
 | |
| 		return 0;
 | |
| 	else
 | |
| 		return autoremove_wake_function(wait, mode, sync, key);
 | |
| }
 | |
| EXPORT_SYMBOL(wake_bit_function);
 | |
| 
 | |
| /*
 | |
|  * To allow interruptible waiting and asynchronous (i.e. nonblocking)
 | |
|  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
 | |
|  * permitted return codes. Nonzero return codes halt waiting and return.
 | |
|  */
 | |
| int __sched fastcall
 | |
| __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
 | |
| 			int (*action)(void *), unsigned mode)
 | |
| {
 | |
| 	int ret = 0;
 | |
| 
 | |
| 	do {
 | |
| 		prepare_to_wait(wq, &q->wait, mode);
 | |
| 		if (test_bit(q->key.bit_nr, q->key.flags))
 | |
| 			ret = (*action)(q->key.flags);
 | |
| 	} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
 | |
| 	finish_wait(wq, &q->wait);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL(__wait_on_bit);
 | |
| 
 | |
| int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
 | |
| 					int (*action)(void *), unsigned mode)
 | |
| {
 | |
| 	wait_queue_head_t *wq = bit_waitqueue(word, bit);
 | |
| 	DEFINE_WAIT_BIT(wait, word, bit);
 | |
| 
 | |
| 	return __wait_on_bit(wq, &wait, action, mode);
 | |
| }
 | |
| EXPORT_SYMBOL(out_of_line_wait_on_bit);
 | |
| 
 | |
| int __sched fastcall
 | |
| __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
 | |
| 			int (*action)(void *), unsigned mode)
 | |
| {
 | |
| 	int ret = 0;
 | |
| 
 | |
| 	do {
 | |
| 		prepare_to_wait_exclusive(wq, &q->wait, mode);
 | |
| 		if (test_bit(q->key.bit_nr, q->key.flags)) {
 | |
| 			if ((ret = (*action)(q->key.flags)))
 | |
| 				break;
 | |
| 		}
 | |
| 	} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
 | |
| 	finish_wait(wq, &q->wait);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL(__wait_on_bit_lock);
 | |
| 
 | |
| int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
 | |
| 					int (*action)(void *), unsigned mode)
 | |
| {
 | |
| 	wait_queue_head_t *wq = bit_waitqueue(word, bit);
 | |
| 	DEFINE_WAIT_BIT(wait, word, bit);
 | |
| 
 | |
| 	return __wait_on_bit_lock(wq, &wait, action, mode);
 | |
| }
 | |
| EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
 | |
| 
 | |
| void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 | |
| {
 | |
| 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 | |
| 	if (waitqueue_active(wq))
 | |
| 		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
 | |
| }
 | |
| EXPORT_SYMBOL(__wake_up_bit);
 | |
| 
 | |
| /**
 | |
|  * wake_up_bit - wake up a waiter on a bit
 | |
|  * @word: the word being waited on, a kernel virtual address
 | |
|  * @bit: the bit of the word being waited on
 | |
|  *
 | |
|  * There is a standard hashed waitqueue table for generic use. This
 | |
|  * is the part of the hashtable's accessor API that wakes up waiters
 | |
|  * on a bit. For instance, if one were to have waiters on a bitflag,
 | |
|  * one would call wake_up_bit() after clearing the bit.
 | |
|  *
 | |
|  * In order for this to function properly, as it uses waitqueue_active()
 | |
|  * internally, some kind of memory barrier must be done prior to calling
 | |
|  * this. Typically, this will be smp_mb__after_clear_bit(), but in some
 | |
|  * cases where bitflags are manipulated non-atomically under a lock, one
 | |
|  * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
 | |
|  * because spin_unlock() does not guarantee a memory barrier.
 | |
|  */
 | |
| void fastcall wake_up_bit(void *word, int bit)
 | |
| {
 | |
| 	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
 | |
| }
 | |
| EXPORT_SYMBOL(wake_up_bit);
 | |
| 
 | |
| fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
 | |
| {
 | |
| 	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
 | |
| 	const struct zone *zone = page_zone(virt_to_page(word));
 | |
| 	unsigned long val = (unsigned long)word << shift | bit;
 | |
| 
 | |
| 	return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
 | |
| }
 | |
| EXPORT_SYMBOL(bit_waitqueue);
 |