mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-05 08:56:10 +00:00
- Consolidate all kmap_atomic() internals into a generic implementation
which builds the base for the kmap_local() API and make the
kmap_atomic() interface wrappers which handle the disabling/enabling of
preemption and pagefaults.
- Switch the storage from per-CPU to per task and provide scheduler
support for clearing mapping when scheduling out and restoring them
when scheduling back in.
- Merge the migrate_disable/enable() code, which is also part of the
scheduler pull request. This was required to make the kmap_local()
interface available which does not disable preemption when a mapping
is established. It has to disable migration instead to guarantee that
the virtual address of the mapped slot is the same accross preemption.
- Provide better debug facilities: guard pages and enforced utilization
of the mapping mechanics on 64bit systems when the architecture allows
it.
- Provide the new kmap_local() API which can now be used to cleanup the
kmap_atomic() usage sites all over the place. Most of the usage sites
do not require the implicit disabling of preemption and pagefaults so
the penalty on 64bit and 32bit non-highmem systems is removed and quite
some of the code can be simplified. A wholesale conversion is not
possible because some usage depends on the implicit side effects and
some need to be cleaned up because they work around these side effects.
The migrate disable side effect is only effective on highmem systems
and when enforced debugging is enabled. On 64bit and 32bit non-highmem
systems the overhead is completely avoided.
-----BEGIN PGP SIGNATURE-----
iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl/XyQwTHHRnbHhAbGlu
dXRyb25peC5kZQAKCRCmGPVMDXSYoUolD/9+R+BX96fGir+I8rG9dc3cbLw5meSi
0I/Nq3PToZMs2Iqv50DsoaPYHHz/M6fcAO9LRIgsE9jRbnY93GnsBM0wU9Y8yQaT
4wUzOG5WHaLDfqIkx/CN9coUl458oEiwOEbn79A2FmPXFzr7IpkufnV3ybGDwzwP
p73bjMJMPPFrsa9ig87YiYfV/5IAZHi82PN8Cq1v4yNzgXRP3Tg6QoAuCO84ZnWF
RYlrfKjcJ2xPdn+RuYyXolPtxr1hJQ0bOUpe4xu/UfeZjxZ7i1wtwLN9kWZe8CKH
+x4Lz8HZZ5QMTQ9sCHOLtKzu2MceMcpISzoQH4/aFQCNMgLn1zLbS790XkYiQCuR
ne9Cua+IqgYfGMG8cq8+bkU9HCNKaXqIBgPEKE/iHYVmqzCOqhW5Cogu4KFekf6V
Wi7pyyUdX2en8BAWpk5NHc8de9cGcc+HXMq2NIcgXjVWvPaqRP6DeITERTZLJOmz
XPxq5oPLGl7wdm7z+ICIaNApy8zuxpzb6sPLNcn7l5OeorViORlUu08AN8587wAj
FiVjp6ZYomg+gyMkiNkDqFOGDH5TMENpOFoB0hNNEyJwwS0xh6CgWuwZcv+N8aPO
HuS/P+tNANbD8ggT4UparXYce7YCtgOf3IG4GA3JJYvYmJ6pU+AZOWRoDScWq4o+
+jlfoJhMbtx5Gg==
=n71I
-----END PGP SIGNATURE-----
Merge tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull kmap updates from Thomas Gleixner:
"The new preemtible kmap_local() implementation:
- Consolidate all kmap_atomic() internals into a generic
implementation which builds the base for the kmap_local() API and
make the kmap_atomic() interface wrappers which handle the
disabling/enabling of preemption and pagefaults.
- Switch the storage from per-CPU to per task and provide scheduler
support for clearing mapping when scheduling out and restoring them
when scheduling back in.
- Merge the migrate_disable/enable() code, which is also part of the
scheduler pull request. This was required to make the kmap_local()
interface available which does not disable preemption when a
mapping is established. It has to disable migration instead to
guarantee that the virtual address of the mapped slot is the same
across preemption.
- Provide better debug facilities: guard pages and enforced
utilization of the mapping mechanics on 64bit systems when the
architecture allows it.
- Provide the new kmap_local() API which can now be used to cleanup
the kmap_atomic() usage sites all over the place. Most of the usage
sites do not require the implicit disabling of preemption and
pagefaults so the penalty on 64bit and 32bit non-highmem systems is
removed and quite some of the code can be simplified. A wholesale
conversion is not possible because some usage depends on the
implicit side effects and some need to be cleaned up because they
work around these side effects.
The migrate disable side effect is only effective on highmem
systems and when enforced debugging is enabled. On 64bit and 32bit
non-highmem systems the overhead is completely avoided"
* tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
ARM: highmem: Fix cache_is_vivt() reference
x86/crashdump/32: Simplify copy_oldmem_page()
io-mapping: Provide iomap_local variant
mm/highmem: Provide kmap_local*
sched: highmem: Store local kmaps in task struct
x86: Support kmap_local() forced debugging
mm/highmem: Provide CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
mm/highmem: Provide and use CONFIG_DEBUG_KMAP_LOCAL
microblaze/mm/highmem: Add dropped #ifdef back
xtensa/mm/highmem: Make generic kmap_atomic() work correctly
mm/highmem: Take kmap_high_get() properly into account
highmem: High implementation details and document API
Documentation/io-mapping: Remove outdated blurb
io-mapping: Cleanup atomic iomap
mm/highmem: Remove the old kmap_atomic cruft
highmem: Get rid of kmap_types.h
xtensa/mm/highmem: Switch to generic kmap atomic
sparc/mm/highmem: Switch to generic kmap atomic
powerpc/mm/highmem: Switch to generic kmap atomic
nds32/mm/highmem: Switch to generic kmap atomic
...
220 lines
5.2 KiB
C
220 lines
5.2 KiB
C
/*
|
|
* arch/xtensa/mm/init.c
|
|
*
|
|
* Derived from MIPS, PPC.
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
|
|
*
|
|
* Chris Zankel <chris@zankel.net>
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
* Marc Gauthier
|
|
* Kevin Chea
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/dma-map-ops.h>
|
|
|
|
#include <asm/bootparam.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/sysmem.h>
|
|
|
|
/*
|
|
* Initialize the bootmem system and give it all low memory we have available.
|
|
*/
|
|
|
|
void __init bootmem_init(void)
|
|
{
|
|
/* Reserve all memory below PHYS_OFFSET, as memory
|
|
* accounting doesn't work for pages below that address.
|
|
*
|
|
* If PHYS_OFFSET is zero reserve page at address 0:
|
|
* successfull allocations should never return NULL.
|
|
*/
|
|
memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
|
|
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
if (!memblock_phys_mem_size())
|
|
panic("No memory found!\n");
|
|
|
|
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
|
|
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
|
|
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
|
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
|
|
|
|
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
|
|
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
|
|
|
|
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
|
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
|
|
|
|
memblock_dump_all();
|
|
}
|
|
|
|
|
|
void __init zones_init(void)
|
|
{
|
|
/* All pages are DMA-able, so we put them all in the DMA zone. */
|
|
unsigned long max_zone_pfn[MAX_NR_ZONES] = {
|
|
[ZONE_NORMAL] = max_low_pfn,
|
|
#ifdef CONFIG_HIGHMEM
|
|
[ZONE_HIGHMEM] = max_pfn,
|
|
#endif
|
|
};
|
|
free_area_init(max_zone_pfn);
|
|
}
|
|
|
|
static void __init free_highpages(void)
|
|
{
|
|
#ifdef CONFIG_HIGHMEM
|
|
unsigned long max_low = max_low_pfn;
|
|
phys_addr_t range_start, range_end;
|
|
u64 i;
|
|
|
|
/* set highmem page free */
|
|
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
|
|
&range_start, &range_end, NULL) {
|
|
unsigned long start = PFN_UP(range_start);
|
|
unsigned long end = PFN_DOWN(range_end);
|
|
|
|
/* Ignore complete lowmem entries */
|
|
if (end <= max_low)
|
|
continue;
|
|
|
|
/* Truncate partial highmem entries */
|
|
if (start < max_low)
|
|
start = max_low;
|
|
|
|
for (; start < end; start++)
|
|
free_highmem_page(pfn_to_page(start));
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Initialize memory pages.
|
|
*/
|
|
|
|
void __init mem_init(void)
|
|
{
|
|
free_highpages();
|
|
|
|
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
|
|
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
|
|
|
|
memblock_free_all();
|
|
|
|
mem_init_print_info(NULL);
|
|
pr_info("virtual kernel memory layout:\n"
|
|
#ifdef CONFIG_KASAN
|
|
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
#endif
|
|
#ifdef CONFIG_MMU
|
|
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
#endif
|
|
#ifdef CONFIG_HIGHMEM
|
|
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
#endif
|
|
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
" .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
|
|
#ifdef CONFIG_KASAN
|
|
KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
|
|
KASAN_SHADOW_SIZE >> 20,
|
|
#endif
|
|
#ifdef CONFIG_MMU
|
|
VMALLOC_START, VMALLOC_END,
|
|
(VMALLOC_END - VMALLOC_START) >> 20,
|
|
#ifdef CONFIG_HIGHMEM
|
|
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
|
|
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
|
FIXADDR_START, FIXADDR_END,
|
|
(FIXADDR_END - FIXADDR_START) >> 10,
|
|
#endif
|
|
PAGE_OFFSET, PAGE_OFFSET +
|
|
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
|
|
#else
|
|
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
|
|
#endif
|
|
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
|
|
(unsigned long)_text, (unsigned long)_etext,
|
|
(unsigned long)(_etext - _text) >> 10,
|
|
(unsigned long)__start_rodata, (unsigned long)__end_rodata,
|
|
(unsigned long)(__end_rodata - __start_rodata) >> 10,
|
|
(unsigned long)_sdata, (unsigned long)_edata,
|
|
(unsigned long)(_edata - _sdata) >> 10,
|
|
(unsigned long)__init_begin, (unsigned long)__init_end,
|
|
(unsigned long)(__init_end - __init_begin) >> 10,
|
|
(unsigned long)__bss_start, (unsigned long)__bss_stop,
|
|
(unsigned long)(__bss_stop - __bss_start) >> 10);
|
|
}
|
|
|
|
static void __init parse_memmap_one(char *p)
|
|
{
|
|
char *oldp;
|
|
unsigned long start_at, mem_size;
|
|
|
|
if (!p)
|
|
return;
|
|
|
|
oldp = p;
|
|
mem_size = memparse(p, &p);
|
|
if (p == oldp)
|
|
return;
|
|
|
|
switch (*p) {
|
|
case '@':
|
|
start_at = memparse(p + 1, &p);
|
|
memblock_add(start_at, mem_size);
|
|
break;
|
|
|
|
case '$':
|
|
start_at = memparse(p + 1, &p);
|
|
memblock_reserve(start_at, mem_size);
|
|
break;
|
|
|
|
case 0:
|
|
memblock_reserve(mem_size, -mem_size);
|
|
break;
|
|
|
|
default:
|
|
pr_warn("Unrecognized memmap syntax: %s\n", p);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int __init parse_memmap_opt(char *str)
|
|
{
|
|
while (str) {
|
|
char *k = strchr(str, ',');
|
|
|
|
if (k)
|
|
*k++ = 0;
|
|
|
|
parse_memmap_one(str);
|
|
str = k;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
early_param("memmap", parse_memmap_opt);
|