summaryrefslogtreecommitdiff
path: root/arch/s390/boot/startup.c
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2023-09-26 15:58:51 +0200
committerAlexander Gordeev <agordeev@linux.ibm.com>2024-04-17 13:38:02 +0200
commit54f2ecc3188f78723267826f634e0747169f8685 (patch)
tree8359599359b4c4730842f3f1dd5ef33e87cb55fa /arch/s390/boot/startup.c
parentc98d2ecae08f02bd2dccd24e7e485e9f0211db65 (diff)
s390: Map kernel at fixed location when KASLR is disabled
Since kernel virtual and physical address spaces are uncoupled the kernel is mapped at the top of the virtual address space in case KASLR is disabled. That does not pose any issue with regard to the kernel booting and operation, but makes it difficult to use a generated vmlinux with some debugging tools (e.g. gdb), because the exact location of the kernel image in virtual memory is unknown. Make that location known and introduce CONFIG_KERNEL_IMAGE_BASE configuration option. A custom CONFIG_KERNEL_IMAGE_BASE value that would break the virtual memory layout leads to a build error. The kernel image size is defined by KERNEL_IMAGE_SIZE macro and set to 512 MB, by analogy with x86. Suggested-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/boot/startup.c')
-rw-r--r--arch/s390/boot/startup.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 3efa1f457451..fc9c1d51ef8d 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -281,8 +281,8 @@ static unsigned long get_vmem_size(unsigned long identity_size,
static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
{
- unsigned long kernel_start, kernel_end;
unsigned long vmemmap_start;
+ unsigned long kernel_start;
unsigned long asce_limit;
unsigned long rte_size;
unsigned long pages;
@@ -294,11 +294,18 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
+ BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
+ BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
- if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
+ if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
+ (vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE;
- rte_size = _REGION2_SIZE;
- vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
+ if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
+ rte_size = _REGION2_SIZE;
+ vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
+ } else {
+ rte_size = _REGION3_SIZE;
+ }
} else {
asce_limit = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
@@ -308,24 +315,32 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
* Forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
+ *
+ * Assume the secure storage limit always exceeds _REGION2_SIZE,
+ * otherwise asce_limit and rte_size would have been adjusted.
*/
vmax = adjust_to_uv_max(asce_limit);
#ifdef CONFIG_KASAN
+ BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
- kernel_end = vmax;
+ vsize = min(vsize, vmax);
if (kaslr_enabled()) {
- unsigned long kaslr_len, slots, pos;
+ unsigned long kernel_end, kaslr_len, slots, pos;
- vsize = min(vsize, vmax);
kaslr_len = max(KASLR_LEN, vmax - vsize);
slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
if (get_random(slots, &pos))
pos = 0;
- kernel_end -= pos * THREAD_SIZE;
+ kernel_end = vmax - pos * THREAD_SIZE;
+ kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
+ } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
+ kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
+ decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
+ } else {
+ kernel_start = __NO_KASLR_START_KERNEL;
}
- kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
__kaslr_offset = kernel_start;
MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);