summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-27 10:16:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-27 10:16:46 -0700
commit57fa2369ab17d67e6232f85b868652fbf4407206 (patch)
treeec828e9ff3059efb1a8cae8940cf62b9161d8d26 /arch/arm64/kernel
parent2fbc66c7838a7fbfb91bf8e119e51e73b4a8a0bc (diff)
parent9186ad8e66bab6a1edc66a939dd56c4638cda948 (diff)
Merge tag 'cfi-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull CFI on arm64 support from Kees Cook: "This builds on last cycle's LTO work, and allows the arm64 kernels to be built with Clang's Control Flow Integrity feature. This feature has happily lived in Android kernels for almost 3 years[1], so I'm excited to have it ready for upstream. The wide diffstat is mainly due to the treewide fixing of mismatched list_sort prototypes. Other things in core kernel are to address various CFI corner cases. The largest code portion is the CFI runtime implementation itself (which will be shared by all architectures implementing support for CFI). The arm64 pieces are Acked by arm64 maintainers rather than coming through the arm64 tree since carrying this tree over there was going to be awkward. CFI support for x86 is still under development, but is pretty close. There are a handful of corner cases on x86 that need some improvements to Clang and objtool, but otherwise works well. Summary: - Clean up list_sort prototypes (Sami Tolvanen) - Introduce CONFIG_CFI_CLANG for arm64 (Sami Tolvanen)" * tag 'cfi-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: arm64: allow CONFIG_CFI_CLANG to be selected KVM: arm64: Disable CFI for nVHE arm64: ftrace: use function_nocfi for ftrace_call arm64: add __nocfi to __apply_alternatives arm64: add __nocfi to functions that jump to a physical address arm64: use function_nocfi with __pa_symbol arm64: implement function_nocfi psci: use function_nocfi for cpu_resume lkdtm: use function_nocfi treewide: Change list_sort to use const pointers bpf: disable CFI in dispatcher functions kallsyms: strip ThinLTO hashes from static functions kthread: use WARN_ON_FUNCTION_MISMATCH workqueue: use WARN_ON_FUNCTION_MISMATCH module: ensure __cfi_check alignment mm: add generic function_nocfi macro cfi: add __cficanonical add support for Clang CFI
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c3
-rw-r--r--arch/arm64/kernel/alternative.c4
-rw-r--r--arch/arm64/kernel/cpu-reset.h10
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/ftrace.c2
-rw-r--r--arch/arm64/kernel/psci.c3
-rw-r--r--arch/arm64/kernel/smp_spin_table.c3
7 files changed, 16 insertions, 13 deletions
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index e7c941d8340d..bfeeb5319abf 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -99,7 +99,8 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_symbol(function_nocfi(secondary_entry)),
+ &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 1184c44ea2c7..abc84636af07 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -133,8 +133,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end);
}
-static void __apply_alternatives(void *alt_region, bool is_module,
- unsigned long *feature_mask)
+static void __nocfi __apply_alternatives(void *alt_region, bool is_module,
+ unsigned long *feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index ed50e9587ad8..9a7b1262ef17 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -13,16 +13,16 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2);
-static inline void __noreturn cpu_soft_restart(unsigned long entry,
- unsigned long arg0,
- unsigned long arg1,
- unsigned long arg2)
+static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2)
{
typeof(__cpu_soft_restart) *restart;
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)__pa_symbol(__cpu_soft_restart);
+ restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart));
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 76c60b3cda53..30c82d38c189 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1451,7 +1451,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-static void
+static void __nocfi
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
@@ -1468,7 +1468,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (arm64_use_ng_mappings)
return;
- remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+ remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 86a5cf9bc19a..b5d3ddaf69d9 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -55,7 +55,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned long pc;
u32 new;
- pc = (unsigned long)&ftrace_call;
+ pc = (unsigned long)function_nocfi(ftrace_call);
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 62d2bda7adb8..ab7f4c476104 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
+ phys_addr_t pa_secondary_entry = __pa_symbol(function_nocfi(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 056772c26098..c45a83512805 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -66,6 +66,7 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
__le64 __iomem *release_addr;
+ phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen));
if (!cpu_release_addr[cpu])
return -ENODEV;
@@ -88,7 +89,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
+ writeq_relaxed(pa_holding_pen, release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));