From e9ab7a2e333615497b3fc426c379c330230c2b50 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:52 +0000 Subject: arm64: alternative: Allow alternative status checking per cpufeature In preparation for the application of alternatives at different points during the boot process, provide the possibility to check whether alternatives for a feature of interest was already applied instead of having a global boolean for all alternatives. Make VHE enablement code check for the VHE feature instead of considering all alternatives. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Suzuki K Poulose Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Catalin Marinas --- arch/arm64/kernel/alternative.c | 21 +++++++++++++++++---- arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index b5d603992d40..c947d2246017 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,13 +32,23 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -int alternatives_applied; +static int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; +bool alternative_is_applied(u16 cpufeature) +{ + if (WARN_ON(cpufeature >= ARM64_NCAPS)) + return false; + + return test_bit(cpufeature, applied_alternatives); +} + /* * Check if the target PC is within an alternative block. */ @@ -192,6 +202,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) dsb(ish); __flush_icache_all(); isb(); + + /* We applied all that was available */ + bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS); } } @@ -208,14 +221,14 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(alternatives_applied)) + while (!READ_ONCE(all_alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(alternatives_applied); + BUG_ON(all_alternatives_applied); __apply_alternatives(®ion, false); /* Barriers provided by the cache flushing */ - WRITE_ONCE(alternatives_applied, 1); + WRITE_ONCE(all_alternatives_applied, 1); } return 0; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6f56e0ab63a1..d607ea33228c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1118,7 +1118,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to * do anything here. */ - if (!alternatives_applied) + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } #endif -- cgit