summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/riscv/Kconfig14
-rw-r--r--arch/riscv/Makefile7
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S17
-rw-r--r--arch/riscv/mm/Makefile4
-rw-r--r--arch/riscv/mm/init.c54
5 files changed, 91 insertions, 5 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c5e42cc37604..ccbb05118ca0 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -561,6 +561,20 @@ config COMPAT
If you want to execute 32-bit userspace applications, say Y.
+config RELOCATABLE
+ bool "Build a relocatable kernel"
+ depends on MMU && 64BIT && !XIP_KERNEL
+ help
+ This builds a kernel as a Position Independent Executable (PIE),
+ which retains all relocation metadata required to relocate the
+ kernel binary at runtime to a different virtual address than the
+ address it was linked at.
+ Since RISCV uses the RELA relocation format, this requires a
+ relocation pass at runtime even if the kernel is loaded at the
+ same address it was linked at.
+
+ If unsure, say N.
+
endmenu # "Kernel features"
menu "Boot options"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6203c3378922..860b09e409c7 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -7,9 +7,12 @@
#
OBJCOPYFLAGS := -O binary
-LDFLAGS_vmlinux :=
+ifeq ($(CONFIG_RELOCATABLE),y)
+ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro
+ KBUILD_CFLAGS += -fPIE
+endif
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
- LDFLAGS_vmlinux := --no-relax
+ LDFLAGS_vmlinux += --no-relax
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
ifeq ($(CONFIG_RISCV_ISA_C),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=4
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index e05e6df44225..615ff5842690 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -122,10 +122,23 @@ SECTIONS
*(.sdata*)
}
- .rela.dyn : {
- *(.rela*)
+ .rela.dyn : ALIGN(8) {
+ __rela_dyn_start = .;
+ *(.rela .rela*)
+ __rela_dyn_end = .;
}
+#ifdef CONFIG_RELOCATABLE
+ .data.rel : { *(.data.rel*) }
+ .got : { *(.got*) }
+ .plt : { *(.plt) }
+ .dynamic : { *(.dynamic) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+#endif
+
#ifdef CONFIG_EFI
.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
__pecoff_data_raw_size = ABSOLUTE(. - __pecoff_text_end);
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 2ac177c05352..b85e9e82f082 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_RELOCATABLE
+CFLAGS_init.o += -fno-pie
+endif
+
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 478d6763a01a..029a844d4ad8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -20,6 +20,9 @@
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
#include <linux/hugetlb.h>
+#ifdef CONFIG_RELOCATABLE
+#include <linux/elf.h>
+#endif
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
@@ -146,7 +149,7 @@ static void __init print_vm_layout(void)
print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
- print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ print_ml("kernel", (unsigned long)kernel_map.virt_addr,
(unsigned long)ADDRESS_SPACE_END);
}
}
@@ -820,6 +823,44 @@ retry:
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
+#ifdef CONFIG_RELOCATABLE
+extern unsigned long __rela_dyn_start, __rela_dyn_end;
+
+static void __init relocate_kernel(void)
+{
+ Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
+ /*
+ * This holds the offset between the linked virtual address and the
+ * relocated virtual address.
+ */
+ uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
+ /*
+ * This holds the offset between kernel linked virtual address and
+ * physical address.
+ */
+ uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
+
+ for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
+ Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
+ Elf64_Addr relocated_addr = rela->r_addend;
+
+ if (rela->r_info != R_RISCV_RELATIVE)
+ continue;
+
+ /*
+ * Make sure to not relocate vdso symbols like rt_sigreturn
+ * which are linked from the address 0 in vmlinux since
+ * vdso symbol addresses are actually used as an offset from
+ * mm->context.vdso in VDSO_OFFSET macro.
+ */
+ if (relocated_addr >= KERNEL_LINK_ADDR)
+ relocated_addr += reloc_offset;
+
+ *(Elf64_Addr *)addr = relocated_addr;
+ }
+}
+#endif /* CONFIG_RELOCATABLE */
+
#ifdef CONFIG_XIP_KERNEL
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
@@ -1007,6 +1048,17 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Early page table uses only one PUD, which makes it possible
+ * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset
+ * makes the kernel cross over a PUD_SIZE boundary, raise a bug
+ * since a part of the kernel would not get mapped.
+ */
+ BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
+ relocate_kernel();
+#endif
+
apply_early_boot_alternatives();
pt_ops_set_early();