summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2014-11-28 13:40:45 +0000
committerWill Deacon <will.deacon@arm.com>2014-12-04 10:28:24 +0000
commit932ded4b0b9bf111fbf9d176ec12152a0d29b0fd (patch)
tree72ae4cdbea21713d55576bf12fa8c095d3b48679 /arch/arm64/kernel
parentcbbf2e6ed7c2adabfa5cc64901c7b89e029d1e20 (diff)
arm64: add module support for alternatives fixups
Currently the kernel patches all necessary instructions once at boot time, so modules are not covered by this. Change the apply_alternatives() function to take a beginning and an end pointer and introduce a new variant (apply_alternatives_all()) to cover the existing use case for the static kernel image section. Add a module_finalize() function to arm64 to check for an alternatives section in a module and patch only the instructions from that specific area. Since that module code is not touched before the module initialization has ended, we don't need to halt the machine before doing the patching in the module's code. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/alternative.c29
-rw-r--r--arch/arm64/kernel/module.c18
-rw-r--r--arch/arm64/kernel/smp.c2
3 files changed, 44 insertions, 5 deletions
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 1a3badab800a..ad7821d64a1d 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -28,12 +28,18 @@
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-static int __apply_alternatives(void *dummy)
+struct alt_region {
+ struct alt_instr *begin;
+ struct alt_instr *end;
+};
+
+static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
+ struct alt_region *region = alt_region;
u8 *origptr, *replptr;
- for (alt = __alt_instructions; alt < __alt_instructions_end; alt++) {
+ for (alt = region->begin; alt < region->end; alt++) {
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -51,10 +57,25 @@ static int __apply_alternatives(void *dummy)
return 0;
}
-void apply_alternatives(void)
+void apply_alternatives_all(void)
{
+ struct alt_region region = {
+ .begin = __alt_instructions,
+ .end = __alt_instructions_end,
+ };
+
/* better not try code patching on a live SMP system */
- stop_machine(__apply_alternatives, NULL, NULL);
+ stop_machine(__apply_alternatives, &region, NULL);
+}
+
+void apply_alternatives(void *start, size_t length)
+{
+ struct alt_region region = {
+ .begin = start,
+ .end = start + length,
+ };
+
+ __apply_alternatives(&region);
}
void free_alternatives_memory(void)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1eb1cc955139..fd027b101de5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -26,6 +26,7 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <asm/insn.h>
+#include <asm/sections.h>
#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
@@ -394,3 +395,20 @@ overflow:
me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
return -ENOEXEC;
}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
+ apply_alternatives((void *)s->sh_addr, s->sh_size);
+ return 0;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 0ef87896e4ae..7ae6ee085261 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -310,7 +310,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- apply_alternatives();
+ apply_alternatives_all();
}
void __init smp_prepare_boot_cpu(void)