mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 20:42:39 +00:00 
			
		
		
		
	 3e4d3af501
			
		
	
	
		3e4d3af501
		
	
	
	
	
		
			
			Keep the current interface but ignore the KM_type and use a stack based approach. The advantage is that we get rid of crappy code like: #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ in_irq() ? KM_IRQ_PTE : \ KM_PTE0) and in general can stop worrying about what context we're in and what kmap slots might be appropriate for that. The downside is that FRV kmap_atomic() gets more expensive. For now we use a CPP trick suggested by Andrew: #define kmap_atomic(page, args...) __kmap_atomic(page) to avoid having to touch all kmap_atomic() users in a single patch. [ not compiled on: - mn10300: the arch doesn't actually build with highmem to begin with ] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			82 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			82 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * highmem.h: virtual kernel memory mappings for high memory
 | |
|  *
 | |
|  * Used in CONFIG_HIGHMEM systems for memory pages which
 | |
|  * are not addressable by direct kernel virtual addresses.
 | |
|  *
 | |
|  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
 | |
|  *		      Gerhard.Wichert@pdb.siemens.de
 | |
|  *
 | |
|  *
 | |
|  * Redesigned the x86 32-bit VM architecture to deal with 
 | |
|  * up to 16 Terrabyte physical memory. With current x86 CPUs
 | |
|  * we now support up to 64 Gigabytes physical RAM.
 | |
|  *
 | |
|  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
 | |
|  */
 | |
| 
 | |
| #ifndef _ASM_HIGHMEM_H
 | |
| #define _ASM_HIGHMEM_H
 | |
| 
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| #include <linux/interrupt.h>
 | |
| #include <asm/fixmap.h>
 | |
| #include <asm/vaddrs.h>
 | |
| #include <asm/kmap_types.h>
 | |
| #include <asm/pgtable.h>
 | |
| 
 | |
| /* declarations for highmem.c */
 | |
| extern unsigned long highstart_pfn, highend_pfn;
 | |
| 
 | |
| extern pte_t *kmap_pte;
 | |
| extern pgprot_t kmap_prot;
 | |
| extern pte_t *pkmap_page_table;
 | |
| 
 | |
| extern void kmap_init(void) __init;
 | |
| 
 | |
| /*
 | |
|  * Right now we initialize only a single pte table. It can be extended
 | |
|  * easily, subsequent pte tables have to be allocated in one physical
 | |
|  * chunk of RAM.  Currently the simplest way to do this is to align the
 | |
|  * pkmap region on a pagetable boundary (4MB).
 | |
|  */
 | |
| #define LAST_PKMAP 1024
 | |
| #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
 | |
| #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
 | |
| 
 | |
| #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
 | |
| #define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
 | |
| #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 | |
| 
 | |
| #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
 | |
| 
 | |
| extern void *kmap_high(struct page *page);
 | |
| extern void kunmap_high(struct page *page);
 | |
| 
 | |
| static inline void *kmap(struct page *page)
 | |
| {
 | |
| 	BUG_ON(in_interrupt());
 | |
| 	if (!PageHighMem(page))
 | |
| 		return page_address(page);
 | |
| 	return kmap_high(page);
 | |
| }
 | |
| 
 | |
| static inline void kunmap(struct page *page)
 | |
| {
 | |
| 	BUG_ON(in_interrupt());
 | |
| 	if (!PageHighMem(page))
 | |
| 		return;
 | |
| 	kunmap_high(page);
 | |
| }
 | |
| 
 | |
| extern void *__kmap_atomic(struct page *page);
 | |
| extern void __kunmap_atomic(void *kvaddr);
 | |
| extern struct page *kmap_atomic_to_page(void *vaddr);
 | |
| 
 | |
| #define flush_cache_kmaps()	flush_cache_all()
 | |
| 
 | |
| #endif /* __KERNEL__ */
 | |
| 
 | |
| #endif /* _ASM_HIGHMEM_H */
 |