diff options
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r-- | arch/riscv/kernel/Makefile | 15 | ||||
-rw-r--r-- | arch/riscv/kernel/alternative.c | 118 | ||||
-rw-r--r-- | arch/riscv/kernel/cpu.c | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/cpufeature.c | 80 | ||||
-rw-r--r-- | arch/riscv/kernel/module.c | 29 | ||||
-rw-r--r-- | arch/riscv/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/riscv/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/riscv/kernel/traps.c | 2 |
8 files changed, 245 insertions, 6 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 87adbe47bc15..bf3876a77ed7 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -14,10 +14,25 @@ ifdef CONFIG_KEXEC AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) endif +# cmodel=medany and notrace when patching early +ifdef CONFIG_RISCV_ALTERNATIVE_EARLY +CFLAGS_alternative.o := -mcmodel=medany +CFLAGS_cpufeature.o := -mcmodel=medany +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) +endif +ifdef CONFIG_KASAN +KASAN_SANITIZE_alternative.o := n +KASAN_SANITIZE_cpufeature.o := n +endif +endif + extra-y += head.o extra-y += vmlinux.lds obj-y += soc.o +obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o obj-y += cpu.o obj-y += cpufeature.o obj-y += entry.o diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c new file mode 100644 index 000000000000..c9d0d3c53223 --- /dev/null +++ b/arch/riscv/kernel/alternative.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * alternative runtime patching + * inspired by the ARM64 and x86 version + * + * Copyright (C) 2021 Sifive. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cpu.h> +#include <linux/uaccess.h> +#include <asm/alternative.h> +#include <asm/sections.h> +#include <asm/vendorid_list.h> +#include <asm/sbi.h> +#include <asm/csr.h> + +struct cpu_manufacturer_info_t { + unsigned long vendor_id; + unsigned long arch_id; + unsigned long imp_id; + void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); +}; + +static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info) +{ +#ifdef CONFIG_RISCV_M_MODE + cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID); + cpu_mfr_info->arch_id = csr_read(CSR_MARCHID); + cpu_mfr_info->imp_id = csr_read(CSR_MIMPID); +#else + cpu_mfr_info->vendor_id = sbi_get_mvendorid(); + cpu_mfr_info->arch_id = sbi_get_marchid(); + cpu_mfr_info->imp_id = sbi_get_mimpid(); +#endif + + switch (cpu_mfr_info->vendor_id) { +#ifdef CONFIG_ERRATA_SIFIVE + case SIFIVE_VENDOR_ID: + cpu_mfr_info->vendor_patch_func = sifive_errata_patch_func; + break; +#endif +#ifdef CONFIG_ERRATA_THEAD + case THEAD_VENDOR_ID: + cpu_mfr_info->vendor_patch_func = thead_errata_patch_func; + break; +#endif + default: + cpu_mfr_info->vendor_patch_func = NULL; + } +} + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +static void __init_or_module _apply_alternatives(struct alt_entry *begin, + struct alt_entry *end, + unsigned int stage) +{ + struct cpu_manufacturer_info_t cpu_mfr_info; + + riscv_fill_cpu_mfr_info(&cpu_mfr_info); + + riscv_cpufeature_patch_func(begin, end, stage); + + if (!cpu_mfr_info.vendor_patch_func) + return; + + cpu_mfr_info.vendor_patch_func(begin, end, + cpu_mfr_info.arch_id, + cpu_mfr_info.imp_id, + stage); +} + +void __init apply_boot_alternatives(void) +{ + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + _apply_alternatives((struct alt_entry *)__alt_start, + (struct alt_entry *)__alt_end, + RISCV_ALTERNATIVES_BOOT); +} + +/* + * apply_early_boot_alternatives() is called from setup_vm() with MMU-off. + * + * Following requirements should be honoured for it to work correctly: + * 1) It should use PC-relative addressing for accessing kernel symbols. + * To achieve this we always use GCC cmodel=medany. + * 2) The compiler instrumentation for FTRACE will not work for setup_vm() + * so disable compiler instrumentation when FTRACE is enabled. + * + * Currently, the above requirements are honoured by using custom CFLAGS + * for alternative.o in kernel/Makefile. + */ +void __init apply_early_boot_alternatives(void) +{ +#ifdef CONFIG_RISCV_ALTERNATIVE_EARLY + _apply_alternatives((struct alt_entry *)__alt_start, + (struct alt_entry *)__alt_end, + RISCV_ALTERNATIVES_EARLY_BOOT); +#endif +} + +#ifdef CONFIG_MODULES +void apply_module_alternatives(void *start, size_t length) +{ + _apply_alternatives((struct alt_entry *)start, + (struct alt_entry *)(start + length), + RISCV_ALTERNATIVES_MODULE); +} +#endif diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index ccb617791e56..40c8776aec12 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -88,6 +88,7 @@ int riscv_of_parent_hartid(struct device_node *node) */ static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), + __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 1b2d42d7f589..dea3ea19deee 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -8,9 +8,15 @@ #include <linux/bitmap.h> #include <linux/ctype.h> +#include <linux/libfdt.h> +#include <linux/module.h> #include <linux/of.h> -#include <asm/processor.h> +#include <asm/alternative.h> +#include <asm/errata_list.h> #include <asm/hwcap.h> +#include <asm/patch.h> +#include <asm/pgtable.h> +#include <asm/processor.h> #include <asm/smp.h> #include <asm/switch_to.h> @@ -192,6 +198,7 @@ void __init riscv_fill_hwcap(void) set_bit(*ext - 'a', this_isa); } else { SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); + SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); } #undef SET_ISA_EXT_MAP } @@ -237,3 +244,74 @@ void __init riscv_fill_hwcap(void) static_branch_enable(&cpu_hwcap_fpu); #endif } + +#ifdef CONFIG_RISCV_ALTERNATIVE +struct cpufeature_info { + char name[ERRATA_STRING_LENGTH_MAX]; + bool (*check_func)(unsigned int stage); +}; + +static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage) +{ +#ifdef CONFIG_RISCV_ISA_SVPBMT + switch (stage) { + case RISCV_ALTERNATIVES_EARLY_BOOT: + return false; + default: + return riscv_isa_extension_available(NULL, SVPBMT); + } +#endif + + return false; +} + +static const struct cpufeature_info __initdata_or_module +cpufeature_list[CPUFEATURE_NUMBER] = { + { + .name = "svpbmt", + .check_func = cpufeature_svpbmt_check_func + }, +}; + +static u32 __init_or_module cpufeature_probe(unsigned int stage) +{ + const struct cpufeature_info *info; + u32 cpu_req_feature = 0; + int idx; + + for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) { + info = &cpufeature_list[idx]; + + if (info->check_func(stage)) + cpu_req_feature |= (1U << idx); + } + + return cpu_req_feature; +} + +void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, + struct alt_entry *end, + unsigned int stage) +{ + u32 cpu_req_feature = cpufeature_probe(stage); + u32 cpu_apply_feature = 0; + struct alt_entry *alt; + u32 tmp; + + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != 0) + continue; + if (alt->errata_id >= CPUFEATURE_NUMBER) { + WARN(1, "This feature id:%d is not in kernel cpufeature list", + alt->errata_id); + continue; + } + + tmp = (1U << alt->errata_id); + if (cpu_req_feature & tmp) { + patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); + cpu_apply_feature |= tmp; + } + } +} +#endif diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index c29cef90d1dd..91fe16bfaa07 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -11,6 +11,7 @@ #include <linux/vmalloc.h> #include <linux/sizes.h> #include <linux/pgtable.h> +#include <asm/alternative.h> #include <asm/sections.h> /* @@ -427,3 +428,31 @@ void *module_alloc(unsigned long size) __builtin_return_address(0)); } #endif + +static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + + return NULL; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + + s = find_section(hdr, sechdrs, ".alternative"); + if (s) + apply_module_alternatives((void *)s->sh_addr, s->sh_size); + + return 0; +} diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 834eb652a7b9..e0373a3056e8 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -21,6 +21,7 @@ #include <linux/efi.h> #include <linux/crash_dump.h> +#include <asm/alternative.h> #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> #include <asm/pgtable.h> @@ -295,6 +296,7 @@ void __init setup_arch(char **cmdline_p) #endif riscv_fill_hwcap(); + apply_boot_alternatives(); } static int __init topology_init(void) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 622f226454d5..f1e4948a4b52 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -32,7 +32,6 @@ #include <asm/sections.h> #include <asm/sbi.h> #include <asm/smp.h> -#include <asm/alternative.h> #include "head.h" @@ -41,9 +40,6 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { init_cpu_topology(); -#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE - apply_boot_alternatives(); -#endif } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index fe92e119e6a3..b40426509244 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code, } } -#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE) +#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) #define __trap_section __section(".xip.traps") #else #define __trap_section |