mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-28 18:10:32 +00:00
s390: Optimize __pa/__va when RANDOMIZE_IDENTITY_BASE is off
Use a zero identity base when CONFIG_RANDOMIZE_IDENTITY_BASE is off, slightly optimizing __pa/__va calculations. Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Acked-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
9af310ef10
commit
d7bebcb4a8
@ -372,8 +372,9 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
|
|||||||
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
|
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
|
||||||
max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
|
max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
|
||||||
max_mappable = min(max_mappable, vmemmap_start);
|
max_mappable = min(max_mappable, vmemmap_start);
|
||||||
if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
|
#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
|
||||||
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
|
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
|
||||||
|
#endif
|
||||||
boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
|
boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
|
||||||
__identity_base + ident_map_size);
|
__identity_base + ident_map_size);
|
||||||
|
|
||||||
|
@ -184,7 +184,11 @@ extern struct vm_layout vm_layout;
|
|||||||
|
|
||||||
#define __kaslr_offset vm_layout.kaslr_offset
|
#define __kaslr_offset vm_layout.kaslr_offset
|
||||||
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
|
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
|
||||||
|
#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
|
||||||
#define __identity_base vm_layout.identity_base
|
#define __identity_base vm_layout.identity_base
|
||||||
|
#else
|
||||||
|
#define __identity_base 0UL
|
||||||
|
#endif
|
||||||
#define ident_map_size vm_layout.identity_size
|
#define ident_map_size vm_layout.identity_size
|
||||||
|
|
||||||
static inline unsigned long kaslr_offset(void)
|
static inline unsigned long kaslr_offset(void)
|
||||||
|
@ -740,7 +740,7 @@ static void __init reserve_lowcore(void)
|
|||||||
void *lowcore_end = lowcore_start + sizeof(struct lowcore);
|
void *lowcore_end = lowcore_start + sizeof(struct lowcore);
|
||||||
void *start, *end;
|
void *start, *end;
|
||||||
|
|
||||||
if ((void *)__identity_base < lowcore_end) {
|
if (absolute_pointer(__identity_base) < lowcore_end) {
|
||||||
start = max(lowcore_start, (void *)__identity_base);
|
start = max(lowcore_start, (void *)__identity_base);
|
||||||
end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
|
end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
|
||||||
memblock_reserve(__pa(start), __pa(end));
|
memblock_reserve(__pa(start), __pa(end));
|
||||||
|
Loading…
Reference in New Issue
Block a user