s390/boot: Add startup debugging support

Add boot_debug() calls to log various memory layout decisions and
randomization details during early startup, improving debugging
capabilities.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
Vasily Gorbik 2024-12-11 16:57:21 +01:00 committed by Alexander Gordeev
parent 418b4d5e61
commit ec6f9f7e5b

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "startup: " fmt
#include <linux/string.h> #include <linux/string.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/page-states.h> #include <asm/page-states.h>
@ -223,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
if (oldmem_data.start) { if (oldmem_data.start) {
__kaslr_enabled = 0; __kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size); ident_map_size = min(ident_map_size, oldmem_data.size);
boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) { } else if (ipl_block_valid && is_ipl_block_dump()) {
__kaslr_enabled = 0; __kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
ident_map_size = min(ident_map_size, hsa_size); ident_map_size = min(ident_map_size, hsa_size);
boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
}
} }
#endif #endif
boot_debug("Identity map size: 0x%016lx\n", ident_map_size);
} }
#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
@ -266,6 +271,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
boot_debug("vmem size estimated: 0x%016lx\n", vsize);
if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
(vsize > _REGION2_SIZE && kaslr_enabled())) { (vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE; asce_limit = _REGION1_SIZE;
@ -289,8 +295,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
* otherwise asce_limit and rte_size would have been adjusted. * otherwise asce_limit and rte_size would have been adjusted.
*/ */
vmax = adjust_to_uv_max(asce_limit); vmax = adjust_to_uv_max(asce_limit);
boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
/* force vmalloc and modules below kasan shadow */ /* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START); vmax = min(vmax, KASAN_SHADOW_START);
#endif #endif
@ -304,19 +312,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
pos = 0; pos = 0;
kernel_end = vmax - pos * THREAD_SIZE; kernel_end = vmax - pos * THREAD_SIZE;
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
kernel_size + kernel_size);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
boot_debug("The kernel base address is forced to %lx\n", kernel_start); boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
kernel_start + kernel_size);
} else { } else {
kernel_start = __NO_KASLR_START_KERNEL; kernel_start = __NO_KASLR_START_KERNEL;
boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
kernel_start + kernel_size);
} }
__kaslr_offset = kernel_start; __kaslr_offset = kernel_start;
boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset);
MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR; VMALLOC_END = MODULES_VADDR;
if (IS_ENABLED(CONFIG_KMSAN)) if (IS_ENABLED(CONFIG_KMSAN))
VMALLOC_END -= MODULES_LEN * 2; VMALLOC_END -= MODULES_LEN * 2;
boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
@ -328,10 +344,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
VMALLOC_END -= vmalloc_size * 2; VMALLOC_END -= vmalloc_size * 2;
} }
VMALLOC_START = VMALLOC_END - vmalloc_size; VMALLOC_START = VMALLOC_END - vmalloc_size;
boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area,
__memcpy_real_area + MEMCPY_REAL_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore)); sizeof(struct lowcore));
boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore,
__abs_lowcore + ABS_LOWCORE_MAP_SIZE);
/* split remaining virtual space between 1:1 mapping & vmemmap array */ /* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
@ -353,6 +374,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
max_mappable = min(max_mappable, vmemmap_start); max_mappable = min(max_mappable, vmemmap_start);
if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
__identity_base = round_down(vmemmap_start - max_mappable, rte_size); __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
__identity_base + ident_map_size);
return asce_limit; return asce_limit;
} }
@ -542,5 +565,6 @@ void startup_kernel(void)
*/ */
psw.addr = __kaslr_offset + vmlinux.entry; psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS; psw.mask = PSW_KERNEL_BITS;
boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
__load_psw(psw); __load_psw(psw);
} }