mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 16:38:31 +00:00 
			
		
		
		
	 f68e148050
			
		
	
	
		f68e148050
		
	
	
	
	
		
			
			When the mm being switched to matches the active mm, we don't need to increment and then drop the mm count. In a simple benchmark this happens in about 50% of time. Making that conditional reduces contention on that cacheline on SMP systems. Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			59 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			59 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* Copyright (C) 2009 Red Hat, Inc.
 | |
|  *
 | |
|  * See ../COPYING for licensing terms.
 | |
|  */
 | |
| 
 | |
| #include <linux/mm.h>
 | |
| #include <linux/mmu_context.h>
 | |
| #include <linux/sched.h>
 | |
| 
 | |
| #include <asm/mmu_context.h>
 | |
| 
 | |
| /*
 | |
|  * use_mm
 | |
|  *	Makes the calling kernel thread take on the specified
 | |
|  *	mm context.
 | |
|  *	Called by the retry thread execute retries within the
 | |
|  *	iocb issuer's mm context, so that copy_from/to_user
 | |
|  *	operations work seamlessly for aio.
 | |
|  *	(Note: this routine is intended to be called only
 | |
|  *	from a kernel thread context)
 | |
|  */
 | |
| void use_mm(struct mm_struct *mm)
 | |
| {
 | |
| 	struct mm_struct *active_mm;
 | |
| 	struct task_struct *tsk = current;
 | |
| 
 | |
| 	task_lock(tsk);
 | |
| 	active_mm = tsk->active_mm;
 | |
| 	if (active_mm != mm) {
 | |
| 		atomic_inc(&mm->mm_count);
 | |
| 		tsk->active_mm = mm;
 | |
| 	}
 | |
| 	tsk->mm = mm;
 | |
| 	switch_mm(active_mm, mm, tsk);
 | |
| 	task_unlock(tsk);
 | |
| 
 | |
| 	if (active_mm != mm)
 | |
| 		mmdrop(active_mm);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * unuse_mm
 | |
|  *	Reverses the effect of use_mm, i.e. releases the
 | |
|  *	specified mm context which was earlier taken on
 | |
|  *	by the calling kernel thread
 | |
|  *	(Note: this routine is intended to be called only
 | |
|  *	from a kernel thread context)
 | |
|  */
 | |
| void unuse_mm(struct mm_struct *mm)
 | |
| {
 | |
| 	struct task_struct *tsk = current;
 | |
| 
 | |
| 	task_lock(tsk);
 | |
| 	tsk->mm = NULL;
 | |
| 	/* active_mm is still 'mm' */
 | |
| 	enter_lazy_tlb(mm, tsk);
 | |
| 	task_unlock(tsk);
 | |
| }
 |