From 4399e6cdf3e920ab99c5f935ecbae88e60682596 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 31 Jan 2020 17:51:06 -0800 Subject: arm64: fix NUMA Kconfig typos Fix typos in arch/arm64/Kconfig: - spell Numa as NUMA - add hyphenation to Non-Uniform Signed-off-by: Randy Dunlap Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..d86d05db3250 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -952,11 +952,11 @@ config HOTPLUG_CPU # Common NUMA Features config NUMA - bool "Numa Memory Allocation and Scheduler Support" + bool "NUMA Memory Allocation and Scheduler Support" select ACPI_NUMA if ACPI select OF_NUMA help - Enable NUMA (Non Uniform Memory Access) support. + Enable NUMA (Non-Uniform Memory Access) support. The kernel will try to allocate memory used by a CPU on the local memory of the CPU and add some more -- cgit From 25b92693a1b67a47b0c64a3410009d09e9658412 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 13 Feb 2020 12:14:52 +0000 Subject: arm64: mm: convert cpu_do_switch_mm() to C There's no reason that cpu_do_switch_mm() needs to be written as an assembly function, and having it as a C function would make it easier to maintain. This patch converts cpu_do_switch_mm() to C, removing code that this change makes redundant (e.g. the mmid macro). Since the header comment was stale and the prototype now implies all the necessary information, this comment is removed. The 'pgd_phys' argument is made a phys_addr_t to match the return type of virt_to_phys(). At the same time, post_ttbr_update_workaround() is updated to use IS_ENABLED(), which allows the compiler to figure out it can elide calls for !CONFIG_CAVIUM_ERRATUM_27456 builds. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Will Deacon [catalin.marinas@arm.com: change comments from asm-style to C-style] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 6 ------ arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/include/asm/proc-fns.h | 2 -- arch/arm64/mm/context.c | 32 ++++++++++++++++++++++++++++++-- arch/arm64/mm/proc.S | 28 ---------------------------- 5 files changed, 32 insertions(+), 38 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aca337d79d12..af03001293c6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -256,12 +256,6 @@ alternative_endif ldr \rd, [\rn, #VMA_VM_MM] .endm -/* - * mmid - get context id from mm pointer (mm->context.id) - */ - .macro mmid, rd, rn - ldr \rd, [\rn, #MM_CONTEXT_ID] - .endm /* * read_ctr - read CTR_EL0. If the system has mismatched register fields, * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3827ff4040a3..ab46187c6300 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -46,6 +46,8 @@ static inline void cpu_set_reserved_ttbr0(void) isb(); } +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); + static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) { BUG_ON(pgd == swapper_pg_dir); diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index a2ce65a0c1fa..0d5d1f0525eb 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -13,11 +13,9 @@ #include -struct mm_struct; struct cpu_suspend_ctx; extern void cpu_do_idle(void); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8ef73e89d514..8524f03d629c 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -6,6 +6,7 @@ * Copyright (C) 2012 ARM Ltd. */ +#include #include #include #include @@ -254,10 +255,37 @@ switch_mm_fastpath: /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) + return; + asm(ALTERNATIVE("nop; nop; nop", "ic iallu; dsb nsh; isb", - ARM64_WORKAROUND_CAVIUM_27456, - CONFIG_CAVIUM_ERRATUM_27456)); + ARM64_WORKAROUND_CAVIUM_27456)); +} + +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) +{ + unsigned long ttbr1 = read_sysreg(ttbr1_el1); + unsigned long asid = ASID(mm); + unsigned long ttbr0 = phys_to_ttbr(pgd_phys); + + /* Skip CNP for the reserved ASID */ + if (system_supports_cnp() && asid) + ttbr0 |= TTBR_CNP_BIT; + + /* SW PAN needs a copy of the ASID in TTBR0 for entry */ + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN)) + ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + /* Set ASID in TTBR1 since TCR.A1 is set */ + ttbr1 &= ~TTBR_ASID_MASK; + ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + write_sysreg(ttbr1, ttbr1_el1); + isb(); + write_sysreg(ttbr0, ttbr0_el1); + isb(); + post_ttbr_update_workaround(); } static int asids_init(void) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..76899c6eee2b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -142,34 +142,6 @@ SYM_FUNC_END(cpu_do_resume) .popsection #endif -/* - * cpu_do_switch_mm(pgd_phys, tsk) - * - * Set the translation table base pointer to be pgd_phys. - * - * - pgd_phys - physical address of new TTB - */ -SYM_FUNC_START(cpu_do_switch_mm) - mrs x2, ttbr1_el1 - mmid x1, x1 // get mm->context.id - phys_to_ttbr x3, x0 - -alternative_if ARM64_HAS_CNP - cbz x1, 1f // skip CNP for reserved ASID - orr x3, x3, #TTBR_CNP_BIT -1: -alternative_else_nop_endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - bfi x3, x1, #48, #16 // set the ASID field in TTBR0 -#endif - bfi x2, x1, #48, #16 // set the ASID - msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) - isb - msr ttbr0_el1, x3 // now update TTBR0 - isb - b post_ttbr_update_workaround // Back to C code... -SYM_FUNC_END(cpu_do_switch_mm) - .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 -- cgit From 6ded0b61cf638bf9f8efe60ab8ba23db60ea9763 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 Feb 2020 16:35:06 +0000 Subject: firmware: arm_sdei: fix double-lock on hibernate with shared events SDEI has private events that must be registered on each CPU. When CPUs come and go they must re-register and re-enable their private events. Each event has flags to indicate whether this should happen to protect against an event being registered on a CPU coming online, while all the others are unregistering the event. These flags are protected by the sdei_list_lock spinlock, because the cpuhp callbacks can't take the mutex. Hibernate needs to unregister all events, but keep the in-memory re-register and re-enable as they are. sdei_unregister_shared() takes the spinlock to walk the list, then calls _sdei_event_unregister() on each shared event. _sdei_event_unregister() tries to take the same spinlock to update re-register and re-enable. This doesn't go so well. Push the re-register and re-enable updates out to their callers. sdei_unregister_shared() doesn't want these values updated, so doesn't need to do anything. This also fixes shared events getting lost over hibernate as this path made them look unregistered. Fixes: da351827240e ("firmware: arm_sdei: Add support for CPU and system power states") Reported-by: Liguang Zhang Signed-off-by: James Morse Signed-off-by: Catalin Marinas --- drivers/firmware/arm_sdei.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index a479023fa036..77eaa9a2fd15 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -491,11 +491,6 @@ static int _sdei_event_unregister(struct sdei_event *event) { lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_unregister(event->event_num); @@ -518,6 +513,11 @@ int sdei_event_unregister(u32 event_num) break; } + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + err = _sdei_event_unregister(event); if (err) break; @@ -585,26 +585,15 @@ static int _sdei_event_register(struct sdei_event *event) lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_register(event->event_num, sdei_entry_point, event->registered, SDEI_EVENT_REGISTER_RM_ANY, 0); - err = sdei_do_cross_call(_local_event_register, event); - if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - + if (err) sdei_do_cross_call(_local_event_unregister, event); - } return err; } @@ -632,8 +621,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); + err = _sdei_event_register(event); if (err) { + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); -- cgit From c66d52b1026717135c5030c65e344750161d159b Mon Sep 17 00:00:00 2001 From: Liguang Zhang Date: Fri, 21 Feb 2020 16:35:07 +0000 Subject: firmware: arm_sdei: fix possible double-lock on hibernate error path We call sdei_reregister_event() with sdei_list_lock held, if the register fails we call sdei_event_destroy() which also acquires sdei_list_lock thus creating A-A deadlock. Add '_llocked' to sdei_reregister_event(), to indicate the list lock is held, and add a _llocked variant of sdei_event_destroy(). Fixes: da351827240e ("firmware: arm_sdei: Add support for CPU and system power states") Signed-off-by: Liguang Zhang [expanded subject, added wrappers instead of duplicating contents] Signed-off-by: James Morse Signed-off-by: Catalin Marinas --- drivers/firmware/arm_sdei.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 77eaa9a2fd15..f15f459e9df0 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -280,13 +280,12 @@ static struct sdei_event *sdei_event_create(u32 event_num, return event; } -static void sdei_event_destroy(struct sdei_event *event) +static void sdei_event_destroy_llocked(struct sdei_event *event) { lockdep_assert_held(&sdei_events_lock); + lockdep_assert_held(&sdei_list_lock); - spin_lock(&sdei_list_lock); list_del(&event->list); - spin_unlock(&sdei_list_lock); if (event->type == SDEI_EVENT_TYPE_SHARED) kfree(event->registered); @@ -296,6 +295,13 @@ static void sdei_event_destroy(struct sdei_event *event) kfree(event); } +static void sdei_event_destroy(struct sdei_event *event) +{ + spin_lock(&sdei_list_lock); + sdei_event_destroy_llocked(event); + spin_unlock(&sdei_list_lock); +} + static int sdei_api_get_version(u64 *version) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version); @@ -643,16 +649,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) } EXPORT_SYMBOL(sdei_event_register); -static int sdei_reregister_event(struct sdei_event *event) +static int sdei_reregister_event_llocked(struct sdei_event *event) { int err; lockdep_assert_held(&sdei_events_lock); + lockdep_assert_held(&sdei_list_lock); err = _sdei_event_register(event); if (err) { pr_err("Failed to re-register event %u\n", event->event_num); - sdei_event_destroy(event); + sdei_event_destroy_llocked(event); return err; } @@ -681,7 +688,7 @@ static int sdei_reregister_shared(void) continue; if (event->reregister) { - err = sdei_reregister_event(event); + err = sdei_reregister_event_llocked(event); if (err) break; } -- cgit From 54f529a6806c9710947a4f2cdc15d6ea54121ccd Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 Feb 2020 16:35:08 +0000 Subject: firmware: arm_sdei: Use cpus_read_lock() to avoid races with cpuhp SDEI has private events that need registering and enabling on each CPU. CPUs can come and go while we are trying to do this. SDEI tries to avoid these problems by setting the reregister flag before the register call, so any CPUs that come online register the event too. Sticking plaster like this doesn't work, as if the register call fails, a CPU that subsequently comes online will register the event before reregister is cleared. Take cpus_read_lock() around the register and enable calls. We don't want surprise CPUs to do the wrong thing if they race with these calls failing. Signed-off-by: James Morse Signed-off-by: Catalin Marinas --- drivers/firmware/arm_sdei.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index f15f459e9df0..45536408a8c1 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -418,14 +418,19 @@ int sdei_event_enable(u32 event_num) return -ENOENT; } - spin_lock(&sdei_list_lock); - event->reenable = true; - spin_unlock(&sdei_list_lock); + cpus_read_lock(); if (event->type == SDEI_EVENT_TYPE_SHARED) err = sdei_api_event_enable(event->event_num); else err = sdei_do_cross_call(_local_event_enable, event); + + if (!err) { + spin_lock(&sdei_list_lock); + event->reenable = true; + spin_unlock(&sdei_list_lock); + } + cpus_read_unlock(); mutex_unlock(&sdei_events_lock); return err; @@ -627,21 +632,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - + cpus_read_lock(); err = _sdei_event_register(event); if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); + } else { + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); } + cpus_read_unlock(); } while (0); mutex_unlock(&sdei_events_lock); -- cgit From f7d5ef0c654e827ba09e6eaadabc5a121c8a1cf4 Mon Sep 17 00:00:00 2001 From: Liguang Zhang Date: Fri, 21 Feb 2020 16:35:09 +0000 Subject: firmware: arm_sdei: clean up sdei_event_create() Function sdei_event_find() is always called in sdei_event_create(), but it is already called in sdei_event_register(). This code is trying to avoid a double-create of the same event, which can't happen as we still hold the sdei_events_lock. We can remove this needless sdei_event_find() call. Signed-off-by: Liguang Zhang [expanded commit message] Signed-off-by: James Morse Signed-off-by: Catalin Marinas --- drivers/firmware/arm_sdei.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 45536408a8c1..334c8be0c11f 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -267,15 +267,9 @@ static struct sdei_event *sdei_event_create(u32 event_num, event->private_registered = regs; } - if (sdei_event_find(event_num)) { - kfree(event->registered); - kfree(event); - event = ERR_PTR(-EBUSY); - } else { - spin_lock(&sdei_list_lock); - list_add(&event->list, &sdei_list); - spin_unlock(&sdei_list_lock); - } + spin_lock(&sdei_list_lock); + list_add(&event->list, &sdei_list); + spin_unlock(&sdei_list_lock); return event; } -- cgit From 90765f745b08fdf069e4562f6985bdba0fefad8d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 28 Feb 2020 12:43:55 +0000 Subject: arm64: Update comment for ASID() macro Commit 25b92693a1b6 ("arm64: mm: convert cpu_do_switch_mm() to C") added a new use of the ASID() macro, so update the comment in asm/mmu.h which reasons about why an atomic reload of 'mm->context.id.counter' is not required. Cc: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/mmu.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e4d862420bb4..21a4bcfdb378 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -23,9 +23,9 @@ typedef struct { } mm_context_t; /* - * This macro is only used by the TLBI code, which cannot race with an - * ASID change and therefore doesn't need to reload the counter using - * atomic64_read. + * This macro is only used by the TLBI and low-level switch_mm() code, + * neither of which can race with an ASID change. We therefore don't + * need to reload the counter using atomic64_read(). */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -- cgit From aaa19727159ef5616219a4b9dad7a84e693aebf5 Mon Sep 17 00:00:00 2001 From: luanshi Date: Wed, 26 Feb 2020 12:25:06 +0800 Subject: perf: arm_spe: Remove unnecessary zero check on 'nr_pages' We already check that the 'nr_pages' is > 2, so there's no need to check that it's != 0 later on. Signed-off-by: Liguang Zhang Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 4e4984a55cd1..b72c04852599 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -831,7 +831,7 @@ static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, * parts and give userspace a fighting chance of getting some * useful data out of it. */ - if (!nr_pages || (snapshot && (nr_pages & 1))) + if (snapshot && (nr_pages & 1)) return NULL; if (cpu == -1) -- cgit From e424b17985262359181cce1553a1ea2cdf263941 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 21 Feb 2020 19:35:31 +0000 Subject: arm64: perf: Refactor PMU init callbacks The PMU init callbacks are already drowning in boilerplate, so before doubling the number of supported PMU models, give it a sensible refactor to significantly reduce the bloat, both in source and object code. Although nobody uses non-default sysfs attributes today, there's minimal impact to preserving the notion that maybe, some day, somebody might, so we may as well keep up appearances. Acked-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 124 +++++++++-------------------------------- 1 file changed, 27 insertions(+), 97 deletions(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e40b65645c86..1e0b04da2f3a 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -953,7 +953,10 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) return probe.present ? 0 : -ENODEV; } -static int armv8_pmu_init(struct arm_pmu *cpu_pmu) +static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, + int (*map_event)(struct perf_event *event), + const struct attribute_group *events, + const struct attribute_group *format) { int ret = armv8pmu_probe_pmu(cpu_pmu); if (ret) @@ -972,135 +975,62 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->filter_match = armv8pmu_filter_match; + cpu_pmu->name = name; + cpu_pmu->map_event = map_event; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? + events : &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? + format : &armv8_pmuv3_format_attr_group; + return 0; } static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_pmuv3"; - cpu_pmu->map_event = armv8_pmuv3_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_pmuv3", + armv8_pmuv3_map_event, NULL, NULL); } static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a35"; - cpu_pmu->map_event = armv8_a53_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", + armv8_a53_map_event, NULL, NULL); } static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a53"; - cpu_pmu->map_event = armv8_a53_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", + armv8_a53_map_event, NULL, NULL); } static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a57"; - cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", + armv8_a57_map_event, NULL, NULL); } static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a72"; - cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", + armv8_a57_map_event, NULL, NULL); } static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cortex_a73"; - cpu_pmu->map_event = armv8_a73_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", + armv8_a73_map_event, NULL, NULL); } static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_cavium_thunder"; - cpu_pmu->map_event = armv8_thunder_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", + armv8_thunder_map_event, NULL, NULL); } static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) { - int ret = armv8_pmu_init(cpu_pmu); - if (ret) - return ret; - - cpu_pmu->name = "armv8_brcm_vulcan"; - cpu_pmu->map_event = armv8_vulcan_map_event; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = - &armv8_pmuv3_events_attr_group; - cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = - &armv8_pmuv3_format_attr_group; - - return 0; + return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", + armv8_vulcan_map_event, NULL, NULL); } static const struct of_device_id armv8_pmu_of_device_ids[] = { -- cgit From 29cc4ceeac1274ab8363a11b81ebd99f3b023985 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 21 Feb 2020 19:35:32 +0000 Subject: arm64: perf: Support new DT compatibles Add support for matching the new PMUs. For now, this just wires them up as generic PMUv3 such that people writing DTs for new SoCs can do the right thing, and at least have architectural and raw events be usable. We can come back and fill in event maps for sysfs and/or perf tools at a later date. Acked-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 56 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1e0b04da2f3a..726cd8bda025 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -991,6 +991,12 @@ static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) armv8_pmuv3_map_event, NULL, NULL); } +static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", @@ -1003,12 +1009,24 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) armv8_a53_map_event, NULL, NULL); } +static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event, NULL, NULL); } +static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", @@ -1021,6 +1039,36 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) armv8_a73_map_event, NULL, NULL); } +static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1", + armv8_pmuv3_map_event, NULL, NULL); +} + +static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) +{ + return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1", + armv8_pmuv3_map_event, NULL, NULL); +} + static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", @@ -1035,11 +1083,19 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, + {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init}, {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, + {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init}, {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, + {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init}, {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, + {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, + {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, + {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, + {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, + {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, {}, -- cgit From 857a141d7fb74237a7a47be40f18e477027f1457 Mon Sep 17 00:00:00 2001 From: Remi Denis-Courmont Date: Wed, 4 Mar 2020 11:35:46 +0200 Subject: arm64: remove gratuitious/stray .ltorg stanzas There are no applicable literals above them. Acked-by: Mark Rutland Signed-off-by: Remi Denis-Courmont Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 1 - arch/arm64/kernel/hibernate-asm.S | 2 -- 2 files changed, 3 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..f79023c9b374 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -404,7 +404,6 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) - .ltorg /* * The following fragment of code is executed with the MMU enabled. diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 38bcd4d4e43b..6532105b3e32 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -110,8 +110,6 @@ ENTRY(swsusp_arch_suspend_exit) cbz x24, 3f /* Do we need to re-initialise EL2? */ hvc #0 3: ret - - .ltorg ENDPROC(swsusp_arch_suspend_exit) /* -- cgit From 2c9d45b43c39e26fd2a73f2203321cdaee98b58b Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:21 +0000 Subject: arm64: add support for the AMU extension v1 The activity monitors extension is an optional extension introduced by the ARMv8.4 CPU architecture. This implements basic support for version 1 of the activity monitors architecture, AMUv1. This support includes: - Extension detection on each CPU (boot, secondary, hotplugged) - Register interface for AMU aarch64 registers Signed-off-by: Ionela Voinescu Reviewed-by: Valentin Schneider Cc: Mark Rutland Cc: Marc Zyngier Cc: Suzuki K Poulose Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 27 +++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/cpufeature.h | 5 +++ arch/arm64/include/asm/sysreg.h | 38 +++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 66 +++++++++++++++++++++++++++++++++++++ 5 files changed, 138 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..fa4e3737149c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1517,6 +1517,33 @@ config ARM64_PTR_AUTH endmenu +menu "ARMv8.4 architectural features" + +config ARM64_AMU_EXTN + bool "Enable support for the Activity Monitors Unit CPU extension" + default y + help + The activity monitors extension is an optional extension introduced + by the ARMv8.4 CPU architecture. This enables support for version 1 + of the activity monitors architecture, AMUv1. + + To enable the use of this extension on CPUs that implement it, say Y. + + Note that for architectural reasons, firmware _must_ implement AMU + support when running on CPUs that present the activity monitors + extension. The required support is present in: + * Version 1.5 and later of the ARM Trusted Firmware + + For kernels that have this configuration enabled but boot with broken + firmware, you may need to say N here until the firmware is fixed. + Otherwise you may experience firmware panics or lockups when + accessing the counter registers. Even if you are not observing these + symptoms, the values returned by the register reads might not + correctly reflect reality. Most commonly, the value read will be 0, + indicating that the counter is not enabled. + +endmenu + menu "ARMv8.5 architectural features" config ARM64_E0PD diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 865e0253fc1e..185e44aa2713 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -58,7 +58,8 @@ #define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_HAS_E0PD 49 #define ARM64_HAS_RNG 50 +#define ARM64_HAS_AMU_EXTN 51 -#define ARM64_NCAPS 51 +#define ARM64_NCAPS 52 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..485e069d8768 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -678,6 +678,11 @@ static inline bool cpu_has_hw_af(void) ID_AA64MMFR1_HADBS_SHIFT); } +#ifdef CONFIG_ARM64_AMU_EXTN +/* Check whether the cpu supports the Activity Monitors Unit (AMU) */ +extern bool cpu_has_amu_feat(int cpu); +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b91570ff9db1..085d248f824e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -386,6 +386,42 @@ #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) +/* Definitions for system register interface to AMU for ARMv8.4 onwards */ +#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2)) +#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0) +#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1) +#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2) +#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3) +#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4) +#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5) +#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0) +#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1) + +/* + * Group 0 of activity monitors (architected): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 010:n<3> n<2:0> + * Type: 11 011 1101 011:n<3> n<2:0> + * n: 0-15 + * + * Group 1 of activity monitors (auxiliary): + * op0 op1 CRn CRm op2 + * Counter: 11 011 1101 110:n<3> n<2:0> + * Type: 11 011 1101 111:n<3> n<2:0> + * n: 0-15 + */ + +#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPE0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7) +#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7) +#define SYS_AMEVTYPE1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7) + +/* AMU v1: Fixed (architecturally defined) activity monitors */ +#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0) +#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1) +#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2) +#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3) + #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) #define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) @@ -598,6 +634,7 @@ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_AMU_SHIFT 44 #define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_GIC_SHIFT 24 @@ -608,6 +645,7 @@ #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#define ID_AA64PFR0_AMU 0x1 #define ID_AA64PFR0_SVE 0x1 #define ID_AA64PFR0_RAS_V1 0x1 #define ID_AA64PFR0_FP_NI 0xf diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..301538d3a197 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -163,6 +163,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), @@ -1222,6 +1223,53 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, #endif +#ifdef CONFIG_ARM64_AMU_EXTN + +/* + * The "amu_cpus" cpumask only signals that the CPU implementation for the + * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide + * information regarding all the events that it supports. When a CPU bit is + * set in the cpumask, the user of this feature can only rely on the presence + * of the 4 fixed counters for that CPU. But this does not guarantee that the + * counters are enabled or access to these counters is enabled by code + * executed at higher exception levels (firmware). + */ +static struct cpumask amu_cpus __read_mostly; + +bool cpu_has_amu_feat(int cpu) +{ + return cpumask_test_cpu(cpu, &amu_cpus); +} + +static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) +{ + if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { + pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", + smp_processor_id()); + cpumask_set_cpu(smp_processor_id(), &amu_cpus); + } +} + +static bool has_amu(const struct arm64_cpu_capabilities *cap, + int __unused) +{ + /* + * The AMU extension is a non-conflicting feature: the kernel can + * safely run a mix of CPUs with and without support for the + * activity monitors extension. Therefore, unconditionally enable + * the capability to allow any late CPU to use the feature. + * + * With this feature unconditionally enabled, the cpu_enable + * function will be called for all CPUs that match the criteria, + * including secondary and hotplugged, marking this feature as + * present on that respective CPU. The enable function will also + * print a detection message. + */ + + return true; +} +#endif + #ifdef CONFIG_ARM64_VHE static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) { @@ -1499,6 +1547,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_ARM64_AMU_EXTN + { + /* + * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y. + * Therefore, don't provide .desc as we don't want the detection + * message to be shown until at least one CPU is detected to + * support the feature. + */ + .capability = ARM64_HAS_AMU_EXTN, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_amu, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR0_AMU_SHIFT, + .min_field_value = ID_AA64PFR0_AMU, + .cpu_enable = cpu_amu_enable, + }, +#endif /* CONFIG_ARM64_AMU_EXTN */ { .desc = "Data cache clean to the PoU not required for I/D coherence", .capability = ARM64_HAS_CACHE_IDC, -- cgit From 87a1f063464afd934f0f22aac710ca65bef77af3 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:22 +0000 Subject: arm64: trap to EL1 accesses to AMU counters from EL0 The activity monitors extension is an optional extension introduced by the ARMv8.4 CPU architecture. In order to access the activity monitors counters safely, if desired, the kernel should detect the presence of the extension through the feature register, and mediate the access. Therefore, disable direct accesses to activity monitors counters from EL0 (userspace) and trap them to EL1 (kernel). To be noted that the ARM64_AMU_EXTN kernel config does not have an effect on this code. Given that the amuserenr_el0 resets to an UNKNOWN value, setting the trap of EL0 accesses to EL1 is always attempted for safety and security considerations. Therefore firmware should still ensure accesses to AMU registers are not trapped in EL2/EL3 as this code cannot be bypassed if the CPU implements the Activity Monitors Unit. Signed-off-by: Ionela Voinescu Reviewed-by: James Morse Reviewed-by: Valentin Schneider Reviewed-by: Suzuki K Poulose Cc: Steve Capper Cc: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 ++++++++++ arch/arm64/mm/proc.S | 3 +++ 2 files changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aca337d79d12..c5487806273f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -430,6 +430,16 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU 9000: .endm +/* + * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present + */ + .macro reset_amuserenr_el0, tmpreg + mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 + ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 + cbz \tmpreg, .Lskip_\@ // Skip if no AMU present + msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 +.Lskip_\@: + .endm /* * copy_page - copy src to dest using temp registers t1-t8 */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..7103027b4e64 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -131,6 +131,7 @@ alternative_endif ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr @@ -423,6 +424,8 @@ SYM_FUNC_START(__cpu_setup) isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 + /* * Memory region attributes */ -- cgit From 4fcdf106a4330bb5c2306a1efbb3af3b7c0db537 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:23 +0000 Subject: arm64/kvm: disable access to AMU registers from kvm guests Access to the AMU counters should be disabled by default in kvm guests, as information from the counters might reveal activity in other guests or activity on the host. Therefore, disable access to AMU registers from EL0 and EL1 in kvm guests by: - Hiding the presence of the extension in the feature register (SYS_ID_AA64PFR0_EL1) on the VCPU. - Disabling access to the AMU registers before switching to the guest. - Trapping accesses and injecting an undefined instruction into the guest. Signed-off-by: Ionela Voinescu Reviewed-by: Suzuki K Poulose Reviewed-by: Valentin Schneider Acked-by: Marc Zyngier Cc: Will Deacon Cc: Catalin Marinas Cc: Suzuki K Poulose Cc: Julien Thierry Cc: James Morse Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/kvm/hyp/switch.c | 14 +++++- arch/arm64/kvm/sys_regs.c | 93 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 105 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 6e5d839f42b5..51c1d9918999 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -267,6 +267,7 @@ /* Hyp Coprocessor Trap Register */ #define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TAM (1 << 30) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index dfe8dd172512..46292a370781 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -98,6 +98,18 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; + + /* + * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to + * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, + * except for some missing controls, such as TAM. + * In this case, CPTR_EL2.TAM has the same position with or without + * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM + * shift value for trapping the AMU accesses. + */ + + val |= CPTR_EL2_TAM; + if (update_fp_enabled(vcpu)) { if (vcpu_has_sve(vcpu)) val |= CPACR_EL1_ZEN; @@ -119,7 +131,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ; + val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { val |= CPTR_EL2_TFP; __activate_traps_fpsimd32(vcpu); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..44354c812783 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1003,6 +1003,20 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } +static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + + return false; +} + +/* Macro to expand the AMU counter and type registers*/ +#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } +#define AMU_AMEVTYPE0_EL0(n) { SYS_DESC(SYS_AMEVTYPE0_EL0(n)), access_amu } +#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } +#define AMU_AMEVTYPE1_EL0(n) { SYS_DESC(SYS_AMEVTYPE1_EL0(n)), access_amu } + static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) @@ -1078,8 +1092,10 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + if (id == SYS_ID_AA64PFR0_EL1) { + if (!vcpu_has_sve(vcpu)) + val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) | @@ -1565,6 +1581,79 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, + { SYS_DESC(SYS_AMCR_EL0), access_amu }, + { SYS_DESC(SYS_AMCFGR_EL0), access_amu }, + { SYS_DESC(SYS_AMCGCR_EL0), access_amu }, + { SYS_DESC(SYS_AMUSERENR_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu }, + AMU_AMEVCNTR0_EL0(0), + AMU_AMEVCNTR0_EL0(1), + AMU_AMEVCNTR0_EL0(2), + AMU_AMEVCNTR0_EL0(3), + AMU_AMEVCNTR0_EL0(4), + AMU_AMEVCNTR0_EL0(5), + AMU_AMEVCNTR0_EL0(6), + AMU_AMEVCNTR0_EL0(7), + AMU_AMEVCNTR0_EL0(8), + AMU_AMEVCNTR0_EL0(9), + AMU_AMEVCNTR0_EL0(10), + AMU_AMEVCNTR0_EL0(11), + AMU_AMEVCNTR0_EL0(12), + AMU_AMEVCNTR0_EL0(13), + AMU_AMEVCNTR0_EL0(14), + AMU_AMEVCNTR0_EL0(15), + AMU_AMEVTYPE0_EL0(0), + AMU_AMEVTYPE0_EL0(1), + AMU_AMEVTYPE0_EL0(2), + AMU_AMEVTYPE0_EL0(3), + AMU_AMEVTYPE0_EL0(4), + AMU_AMEVTYPE0_EL0(5), + AMU_AMEVTYPE0_EL0(6), + AMU_AMEVTYPE0_EL0(7), + AMU_AMEVTYPE0_EL0(8), + AMU_AMEVTYPE0_EL0(9), + AMU_AMEVTYPE0_EL0(10), + AMU_AMEVTYPE0_EL0(11), + AMU_AMEVTYPE0_EL0(12), + AMU_AMEVTYPE0_EL0(13), + AMU_AMEVTYPE0_EL0(14), + AMU_AMEVTYPE0_EL0(15), + AMU_AMEVCNTR1_EL0(0), + AMU_AMEVCNTR1_EL0(1), + AMU_AMEVCNTR1_EL0(2), + AMU_AMEVCNTR1_EL0(3), + AMU_AMEVCNTR1_EL0(4), + AMU_AMEVCNTR1_EL0(5), + AMU_AMEVCNTR1_EL0(6), + AMU_AMEVCNTR1_EL0(7), + AMU_AMEVCNTR1_EL0(8), + AMU_AMEVCNTR1_EL0(9), + AMU_AMEVCNTR1_EL0(10), + AMU_AMEVCNTR1_EL0(11), + AMU_AMEVCNTR1_EL0(12), + AMU_AMEVCNTR1_EL0(13), + AMU_AMEVCNTR1_EL0(14), + AMU_AMEVCNTR1_EL0(15), + AMU_AMEVTYPE1_EL0(0), + AMU_AMEVTYPE1_EL0(1), + AMU_AMEVTYPE1_EL0(2), + AMU_AMEVTYPE1_EL0(3), + AMU_AMEVTYPE1_EL0(4), + AMU_AMEVTYPE1_EL0(5), + AMU_AMEVTYPE1_EL0(6), + AMU_AMEVTYPE1_EL0(7), + AMU_AMEVTYPE1_EL0(8), + AMU_AMEVTYPE1_EL0(9), + AMU_AMEVTYPE1_EL0(10), + AMU_AMEVTYPE1_EL0(11), + AMU_AMEVTYPE1_EL0(12), + AMU_AMEVTYPE1_EL0(13), + AMU_AMEVTYPE1_EL0(14), + AMU_AMEVTYPE1_EL0(15), + { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, -- cgit From 6abde90881a5ea3a383c0959fdd7f575f95db4b3 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:24 +0000 Subject: Documentation: arm64: document support for the AMU extension The activity monitors extension is an optional extension introduced by the ARMv8.4 CPU architecture. Add initial documentation for the AMUv1 extension: - arm64/amu.txt: AMUv1 documentation - arm64/booting.txt: system registers initialisation Signed-off-by: Ionela Voinescu Reviewed-by: Valentin Schneider Cc: Jonathan Corbet Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- Documentation/arm64/amu.rst | 112 ++++++++++++++++++++++++++++++++++++++++ Documentation/arm64/booting.rst | 14 +++++ Documentation/arm64/index.rst | 1 + 3 files changed, 127 insertions(+) create mode 100644 Documentation/arm64/amu.rst diff --git a/Documentation/arm64/amu.rst b/Documentation/arm64/amu.rst new file mode 100644 index 000000000000..5057b11100ed --- /dev/null +++ b/Documentation/arm64/amu.rst @@ -0,0 +1,112 @@ +======================================================= +Activity Monitors Unit (AMU) extension in AArch64 Linux +======================================================= + +Author: Ionela Voinescu + +Date: 2019-09-10 + +This document briefly describes the provision of Activity Monitors Unit +support in AArch64 Linux. + + +Architecture overview +--------------------- + +The activity monitors extension is an optional extension introduced by the +ARMv8.4 CPU architecture. + +The activity monitors unit, implemented in each CPU, provides performance +counters intended for system management use. The AMU extension provides a +system register interface to the counter registers and also supports an +optional external memory-mapped interface. + +Version 1 of the Activity Monitors architecture implements a counter group +of four fixed and architecturally defined 64-bit event counters. + - CPU cycle counter: increments at the frequency of the CPU. + - Constant counter: increments at the fixed frequency of the system + clock. + - Instructions retired: increments with every architecturally executed + instruction. + - Memory stall cycles: counts instruction dispatch stall cycles caused by + misses in the last level cache within the clock domain. + +When in WFI or WFE these counters do not increment. + +The Activity Monitors architecture provides space for up to 16 architected +event counters. Future versions of the architecture may use this space to +implement additional architected event counters. + +Additionally, version 1 implements a counter group of up to 16 auxiliary +64-bit event counters. + +On cold reset all counters reset to 0. + + +Basic support +------------- + +The kernel can safely run a mix of CPUs with and without support for the +activity monitors extension. Therefore, when CONFIG_ARM64_AMU_EXTN is +selected we unconditionally enable the capability to allow any late CPU +(secondary or hotplugged) to detect and use the feature. + +When the feature is detected on a CPU, we flag the availability of the +feature but this does not guarantee the correct functionality of the +counters, only the presence of the extension. + +Firmware (code running at higher exception levels, e.g. arm-tf) support is +needed to: + - Enable access for lower exception levels (EL2 and EL1) to the AMU + registers. + - Enable the counters. If not enabled these will read as 0. + - Save/restore the counters before/after the CPU is being put/brought up + from the 'off' power state. + +When using kernels that have this feature enabled but boot with broken +firmware the user may experience panics or lockups when accessing the +counter registers. Even if these symptoms are not observed, the values +returned by the register reads might not correctly reflect reality. Most +commonly, the counters will read as 0, indicating that they are not +enabled. + +If proper support is not provided in firmware it's best to disable +CONFIG_ARM64_AMU_EXTN. To be noted that for security reasons, this does not +bypass the setting of AMUSERENR_EL0 to trap accesses from EL0 (userspace) to +EL1 (kernel). Therefore, firmware should still ensure accesses to AMU registers +are not trapped in EL2/EL3. + +The fixed counters of AMUv1 are accessible though the following system +register definitions: + - SYS_AMEVCNTR0_CORE_EL0 + - SYS_AMEVCNTR0_CONST_EL0 + - SYS_AMEVCNTR0_INST_RET_EL0 + - SYS_AMEVCNTR0_MEM_STALL_EL0 + +Auxiliary platform specific counters can be accessed using +SYS_AMEVCNTR1_EL0(n), where n is a value between 0 and 15. + +Details can be found in: arch/arm64/include/asm/sysreg.h. + + +Userspace access +---------------- + +Currently, access from userspace to the AMU registers is disabled due to: + - Security reasons: they might expose information about code executed in + secure mode. + - Purpose: AMU counters are intended for system management use. + +Also, the presence of the feature is not visible to userspace. + + +Virtualization +-------------- + +Currently, access from userspace (EL0) and kernelspace (EL1) on the KVM +guest side is disabled due to: + - Security reasons: they might expose information about code executed + by other guests or the host. + +Any attempt to access the AMU registers will result in an UNDEFINED +exception being injected into the guest. diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 5d78a6f5b0ae..a3f1a47b6f1c 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -248,6 +248,20 @@ Before jumping into the kernel, the following conditions must be met: - HCR_EL2.APK (bit 40) must be initialised to 0b1 - HCR_EL2.API (bit 41) must be initialised to 0b1 + For CPUs with Activity Monitors Unit v1 (AMUv1) extension present: + - If EL3 is present: + CPTR_EL3.TAM (bit 30) must be initialised to 0b0 + CPTR_EL2.TAM (bit 30) must be initialised to 0b0 + AMCNTENSET0_EL0 must be initialised to 0b1111 + AMCNTENSET1_EL0 must be initialised to a platform specific value + having 0b1 set for the corresponding bit for each of the auxiliary + counters present. + - If the kernel is entered at EL1: + AMCNTENSET0_EL0 must be initialised to 0b1111 + AMCNTENSET1_EL0 must be initialised to a platform specific value + having 0b1 set for the corresponding bit for each of the auxiliary + counters present. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst index 5c0c69dc58aa..09cbb4ed2237 100644 --- a/Documentation/arm64/index.rst +++ b/Documentation/arm64/index.rst @@ -6,6 +6,7 @@ ARM64 Architecture :maxdepth: 1 acpi_object_usage + amu arm-acpi booting cpu-feature-registers -- cgit From bbce8eaa603236bf958b0d24e6377b3f3b623991 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:25 +0000 Subject: cpufreq: add function to get the hardware max frequency Add weak function to return the hardware maximum frequency of a CPU, with the default implementation returning cpuinfo.max_freq, which is the best information we can generically get from the cpufreq framework. The default can be overwritten by a strong function in platforms that want to provide an alternative implementation, with more accurate information, obtained either from hardware or firmware. Signed-off-by: Ionela Voinescu Reviewed-by: Valentin Schneider Acked-by: Viresh Kumar Cc: Viresh Kumar Cc: Rafael J. Wysocki Signed-off-by: Catalin Marinas --- drivers/cpufreq/cpufreq.c | 20 ++++++++++++++++++++ include/linux/cpufreq.h | 5 +++++ 2 files changed, 25 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cbe6c94bf158..985228aee46f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1725,6 +1725,26 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_quick_get_max); +/** + * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU + * @cpu: CPU number + * + * The default return value is the max_freq field of cpuinfo. + */ +__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + unsigned int ret_freq = 0; + + if (policy) { + ret_freq = policy->cpuinfo.max_freq; + cpufreq_cpu_put(policy); + } + + return ret_freq; +} +EXPORT_SYMBOL(cpufreq_get_hw_max_freq); + static unsigned int __cpufreq_get(struct cpufreq_policy *policy) { if (unlikely(policy_is_inactive(policy))) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 0fb561d1b524..f7240251a949 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -205,6 +205,7 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy) unsigned int cpufreq_get(unsigned int cpu); unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); +unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); @@ -232,6 +233,10 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) { return 0; } +static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) +{ + return 0; +} static inline void disable_cpufreq(void) { } #endif -- cgit From cd0ed03a8903a0b0c6fc36e32d133d1ddfe70cd6 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:26 +0000 Subject: arm64: use activity monitors for frequency invariance The Frequency Invariance Engine (FIE) is providing a frequency scaling correction factor that helps achieve more accurate load-tracking. So far, for arm and arm64 platforms, this scale factor has been obtained based on the ratio between the current frequency and the maximum supported frequency recorded by the cpufreq policy. The setting of this scale factor is triggered from cpufreq drivers by calling arch_set_freq_scale. The current frequency used in computation is the frequency requested by a governor, but it may not be the frequency that was implemented by the platform. This correction factor can also be obtained using a core counter and a constant counter to get information on the performance (frequency based only) obtained in a period of time. This will more accurately reflect the actual current frequency of the CPU, compared with the alternative implementation that reflects the request of a performance level from the OS. Therefore, implement arch_scale_freq_tick to use activity monitors, if present, for the computation of the frequency scale factor. The use of AMU counters depends on: - CONFIG_ARM64_AMU_EXTN - depents on the AMU extension being present - CONFIG_CPU_FREQ - the current frequency obtained using counter information is divided by the maximum frequency obtained from the cpufreq policy. While it is possible to have a combination of CPUs in the system with and without support for activity monitors, the use of counters for frequency invariance is only enabled for a CPU if all related CPUs (CPUs in the same frequency domain) support and have enabled the core and constant activity monitor counters. In this way, there is a clear separation between the policies for which arch_set_freq_scale (cpufreq based FIE) is used, and the policies for which arch_scale_freq_tick (counter based FIE) is used to set the frequency scale factor. For this purpose, a late_initcall_sync is registered to trigger validation work for policies that will enable or disable the use of AMU counters for frequency invariance. If CONFIG_CPU_FREQ is not defined, the use of counters is enabled on all CPUs only if all possible CPUs correctly support the necessary counters. Signed-off-by: Ionela Voinescu Reviewed-by: Lukasz Luba Acked-by: Sudeep Holla Cc: Sudeep Holla Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/topology.h | 9 ++ arch/arm64/kernel/cpufeature.c | 4 + arch/arm64/kernel/topology.c | 180 ++++++++++++++++++++++++++++++++++++++ drivers/base/arch_topology.c | 12 +++ include/linux/arch_topology.h | 2 + 5 files changed, 207 insertions(+) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index a4d945db95a2..21d4d40d6243 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -16,6 +16,15 @@ int pcibus_to_node(struct pci_bus *bus); #include +#ifdef CONFIG_ARM64_AMU_EXTN +/* + * Replace task scheduler's default counter-based + * frequency-invariance scale factor setting. + */ +void topology_scale_freq_tick(void); +#define arch_scale_freq_tick topology_scale_freq_tick +#endif /* CONFIG_ARM64_AMU_EXTN */ + /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_capacity topology_get_freq_scale diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 301538d3a197..bf98a2386534 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1241,12 +1241,16 @@ bool cpu_has_amu_feat(int cpu) return cpumask_test_cpu(cpu, &amu_cpus); } +/* Initialize the use of AMU counters for frequency invariance */ +extern void init_cpu_freq_invariance_counters(void); + static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); + init_cpu_freq_invariance_counters(); } } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index fa9528dfd0ce..0801a0f3c156 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -120,4 +121,183 @@ int __init parse_acpi_topology(void) } #endif +#ifdef CONFIG_ARM64_AMU_EXTN +#undef pr_fmt +#define pr_fmt(fmt) "AMU: " fmt + +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); +static DEFINE_PER_CPU(u64, arch_const_cycles_prev); +static DEFINE_PER_CPU(u64, arch_core_cycles_prev); +static cpumask_var_t amu_fie_cpus; + +/* Initialize counter reference per-cpu variables for the current CPU */ +void init_cpu_freq_invariance_counters(void) +{ + this_cpu_write(arch_core_cycles_prev, + read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)); + this_cpu_write(arch_const_cycles_prev, + read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)); +} + +static int validate_cpu_freq_invariance_counters(int cpu) +{ + u64 max_freq_hz, ratio; + + if (!cpu_has_amu_feat(cpu)) { + pr_debug("CPU%d: counters are not supported.\n", cpu); + return -EINVAL; + } + + if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || + !per_cpu(arch_core_cycles_prev, cpu))) { + pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); + return -EINVAL; + } + + /* Convert maximum frequency from KHz to Hz and validate */ + max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; + if (unlikely(!max_freq_hz)) { + pr_debug("CPU%d: invalid maximum frequency.\n", cpu); + return -EINVAL; + } + + /* + * Pre-compute the fixed ratio between the frequency of the constant + * counter and the maximum frequency of the CPU. + * + * const_freq + * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE² + * cpuinfo_max_freq + * + * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE² + * in order to ensure a good resolution for arch_max_freq_scale for + * very low arch timer frequencies (down to the KHz range which should + * be unlikely). + */ + ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT); + ratio = div64_u64(ratio, max_freq_hz); + if (!ratio) { + WARN_ONCE(1, "System timer frequency too low.\n"); + return -EINVAL; + } + + per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; + + return 0; +} + +static inline bool +enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) { + pr_debug("CPU%d: No cpufreq policy found.\n", cpu); + return false; + } + + if (cpumask_subset(policy->related_cpus, valid_cpus)) + cpumask_or(amu_fie_cpus, policy->related_cpus, + amu_fie_cpus); + + cpufreq_cpu_put(policy); + + return true; +} + +static DEFINE_STATIC_KEY_FALSE(amu_fie_key); +#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) + +static int __init init_amu_fie(void) +{ + cpumask_var_t valid_cpus; + bool have_policy = false; + int ret = 0; + int cpu; + + if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL)) + return -ENOMEM; + + if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_valid_mask; + } + + for_each_present_cpu(cpu) { + if (validate_cpu_freq_invariance_counters(cpu)) + continue; + cpumask_set_cpu(cpu, valid_cpus); + have_policy |= enable_policy_freq_counters(cpu, valid_cpus); + } + + /* + * If we are not restricted by cpufreq policies, we only enable + * the use of the AMU feature for FIE if all CPUs support AMU. + * Otherwise, enable_policy_freq_counters has already enabled + * policy cpus. + */ + if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask)) + cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus); + + if (!cpumask_empty(amu_fie_cpus)) { + pr_info("CPUs[%*pbl]: counters will be used for FIE.", + cpumask_pr_args(amu_fie_cpus)); + static_branch_enable(&amu_fie_key); + } + +free_valid_mask: + free_cpumask_var(valid_cpus); + + return ret; +} +late_initcall_sync(init_amu_fie); + +bool arch_freq_counters_available(struct cpumask *cpus) +{ + return amu_freq_invariant() && + cpumask_subset(cpus, amu_fie_cpus); +} + +void topology_scale_freq_tick(void) +{ + u64 prev_core_cnt, prev_const_cnt; + u64 core_cnt, const_cnt, scale; + int cpu = smp_processor_id(); + + if (!amu_freq_invariant()) + return; + + if (!cpumask_test_cpu(cpu, amu_fie_cpus)) + return; + + const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0); + core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0); + prev_const_cnt = this_cpu_read(arch_const_cycles_prev); + prev_core_cnt = this_cpu_read(arch_core_cycles_prev); + + if (unlikely(core_cnt <= prev_core_cnt || + const_cnt <= prev_const_cnt)) + goto store_and_exit; + + /* + * /\core arch_max_freq_scale + * scale = ------- * -------------------- + * /\const SCHED_CAPACITY_SCALE + * + * See validate_cpu_freq_invariance_counters() for details on + * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. + */ + scale = core_cnt - prev_core_cnt; + scale *= this_cpu_read(arch_max_freq_scale); + scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, + const_cnt - prev_const_cnt); + + scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); + this_cpu_write(freq_scale, (unsigned long)scale); + +store_and_exit: + this_cpu_write(arch_core_cycles_prev, core_cnt); + this_cpu_write(arch_const_cycles_prev, const_cnt); +} +#endif /* CONFIG_ARM64_AMU_EXTN */ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 6119e11a9f95..8d63673c1689 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,6 +21,10 @@ #include #include +__weak bool arch_freq_counters_available(struct cpumask *cpus) +{ + return false; +} DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, @@ -29,6 +33,14 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, unsigned long scale; int i; + /* + * If the use of counters for FIE is enabled, just return as we don't + * want to update the scale factor with information from CPUFREQ. + * Instead the scale factor will be updated from arch_scale_freq_tick. + */ + if (arch_freq_counters_available(cpus)) + return; + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; for_each_cpu(i, cpus) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 3015ecbb90b1..1ccdddb541a7 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -33,6 +33,8 @@ unsigned long topology_get_freq_scale(int cpu) return per_cpu(freq_scale, cpu); } +bool arch_freq_counters_available(struct cpumask *cpus); + struct cpu_topology { int thread_id; int core_id; -- cgit From c265861af2af0f667e88b1b70acc234f8c61e0ae Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 5 Mar 2020 09:06:27 +0000 Subject: clocksource/drivers/arm_arch_timer: validate arch_timer_rate Using an arch timer with a frequency of less than 1MHz can potentially result in incorrect functionality in systems that assume a reasonable rate of the arch timer of 1 to 50MHz, described as typical in the architecture specification. Therefore, warn if the arch timer rate is below 1MHz, which is considered atypical and worth emphasizing. Suggested-by: Valentin Schneider Signed-off-by: Ionela Voinescu Acked-by: Marc Zyngier Cc: Marc Zyngier Cc: Mark Rutland Signed-off-by: Catalin Marinas --- drivers/clocksource/arm_arch_timer.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 9a5464c625b4..4faa930eabf8 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -885,6 +885,17 @@ static int arch_timer_starting_cpu(unsigned int cpu) return 0; } +static int validate_timer_rate(void) +{ + if (!arch_timer_rate) + return -EINVAL; + + /* Arch timer frequency < 1MHz can cause trouble */ + WARN_ON(arch_timer_rate < 1000000); + + return 0; +} + /* * For historical reasons, when probing with DT we use whichever (non-zero) * rate was probed first, and don't verify that others match. If the first node @@ -900,7 +911,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) arch_timer_rate = rate; /* Check the timer frequency. */ - if (arch_timer_rate == 0) + if (validate_timer_rate()) pr_warn("frequency not available\n"); } @@ -1594,9 +1605,10 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table) * CNTFRQ value. This *must* be correct. */ arch_timer_rate = arch_timer_get_cntfrq(); - if (!arch_timer_rate) { + ret = validate_timer_rate(); + if (ret) { pr_err(FW_BUG "frequency not available.\n"); - return -EINVAL; + return ret; } arch_timer_uses_ppi = arch_timer_select_ppi(); -- cgit From 27afb236fe5adaa3911e47c91057ba783549226f Mon Sep 17 00:00:00 2001 From: 王程刚 Date: Mon, 9 Mar 2020 15:21:42 +0800 Subject: arch/arm64: fix typo in a comment Fix typo in a comment in arch/arm64/include/asm/esr.h "Unallocted" -> "Unallocated" Signed-off-by: Chenggang Wang Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index cb29253ae86b..6a395a7e6707 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -60,7 +60,7 @@ #define ESR_ELx_EC_BKPT32 (0x38) /* Unallocated EC: 0x39 */ #define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ -/* Unallocted EC: 0x3B */ +/* Unallocated EC: 0x3B */ #define ESR_ELx_EC_BRK64 (0x3C) /* Unallocated EC: 0x3D - 0x3F */ #define ESR_ELx_EC_MAX (0x3F) -- cgit From e9c7ddbf8b4b6a291bf3b5bfa7c883235164d9be Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 20 Jan 2020 18:52:29 +0000 Subject: arm64: csum: Optimise IPv6 header checksum Throwing our __uint128_t idioms at csum_ipv6_magic() makes it about 1.3x-2x faster across a range of microarchitecture/compiler combinations. Not much in absolute terms, but every little helps. Signed-off-by: Robin Murphy Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/checksum.h | 7 ++++++- arch/arm64/lib/csum.c | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 8d2a7de39744..b6f7bc6da5fb 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -5,7 +5,12 @@ #ifndef __ASM_CHECKSUM_H #define __ASM_CHECKSUM_H -#include +#include + +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); static inline __sum16 csum_fold(__wsum csum) { diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 1f82c66b32ea..60eccae2abad 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -124,3 +124,30 @@ unsigned int do_csum(const unsigned char *buff, int len) return sum >> 16; } + +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + __uint128_t src, dst; + u64 sum = (__force u64)csum; + + src = *(const __uint128_t *)saddr->s6_addr; + dst = *(const __uint128_t *)daddr->s6_addr; + + sum += (__force u32)htonl(len); +#ifdef __LITTLE_ENDIAN + sum += (u32)proto << 24; +#else + sum += proto; +#endif + src += (src >> 64) | (src << 64); + dst += (dst >> 64) | (dst << 64); + + sum = accumulate(sum, src >> 64); + sum = accumulate(sum, dst >> 64); + + sum += ((sum >> 32) | (sum << 32)); + return csum_fold((__force __wsum)(sum >> 32)); +} +EXPORT_SYMBOL(csum_ipv6_magic); -- cgit From b8f58ac7c38af1e22db125622a3a3e9bb9fb9fa2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 Mar 2020 14:20:52 +0900 Subject: arm64: efi: add efi-entry.o to targets instead of extra-$(CONFIG_EFI) efi-entry.o is built on demand for efi-entry.stub.o, so you do not have to repeat $(CONFIG_EFI) here. Adding it to 'targets' is enough. Signed-off-by: Masahiro Yamada Signed-off-by: Catalin Marinas Acked-by: Ard Biesheuvel Reviewed-by: Vincenzo Frascino --- arch/arm64/kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index fc6488660f64..4e5b8ee31442 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -21,7 +21,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ syscall.o -extra-$(CONFIG_EFI) := efi-entry.o +targets += efi-entry.o OBJCOPYFLAGS := --prefix-symbols=__efistub_ $(obj)/%.stub.o: $(obj)/%.o FORCE -- cgit From f0c0d4b74d59809568f560001c8f88e8211334a4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 28 Feb 2020 14:59:42 +0000 Subject: arm64: entry: unmask IRQ in el0_sp() Currently, the EL0 SP alignment handler masks IRQs unnecessarily. It does so due to historic code sharing of the EL0 SP and PC alignment handlers, and branch predictor hardening applicable to the EL0 SP handler. We began masking IRQs in the EL0 SP alignment handler in commit: 5dfc6ed27710c42c ("arm64: entry: Apply BP hardening for high-priority synchronous exception") ... as this shared code with the EL0 PC alignment handler, and branch predictor hardening made it necessary to disable IRQs for early parts of the EL0 PC alignment handler. It was not necessary to mask IRQs during EL0 SP alignment exceptions, but it was not considered harmful to do so. This masking was carried forward into C code in commit: 582f95835a8fc812 ("arm64: entry: convert el0_sync to C") ... where the SP/PC cases were split into separate handlers, and the masking duplicated. Subsequently the EL0 PC alignment handler was refactored to perform branch predictor hardening before unmasking IRQs, in commit: bfe298745afc9548 ("arm64: entry-common: don't touch daif before bp-hardening") ... but the redundant masking of IRQs was not removed from the EL0 SP alignment handler. Let's do so now, and make it interruptible as with most other synchronous exception handlers. Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Catalin Marinas Reviewed-by: James Morse --- arch/arm64/kernel/entry-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index fde59981445c..c839b5bf1904 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -175,7 +175,7 @@ NOKPROBE_SYMBOL(el0_pc); static void notrace el0_sp(struct pt_regs *regs, unsigned long esr) { user_exit_irqoff(); - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(DAIF_PROCCTX); do_sp_pc_abort(regs->sp, esr, regs); } NOKPROBE_SYMBOL(el0_sp); -- cgit From 1db5cdeccd813330aaab19b3fccab15e1d07fe12 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 21 Feb 2020 14:50:21 +0000 Subject: arm64: cpufeature: add cpus_have_final_cap() When cpus_have_const_cap() was originally introduced it was intended to be safe in hyp context, where it is not safe to access the cpu_hwcaps array as cpus_have_cap() did. For more details see commit: a4023f682739439b ("arm64: Add hypervisor safe helper for checking constant capabilities") We then made use of cpus_have_const_cap() throughout the kernel. Subsequently, we had to defer updating the static_key associated with each capability in order to avoid lockdep complaints. To avoid breaking kernel-wide usage of cpus_have_const_cap(), this was updated to fall back to the cpu_hwcaps array if called before the static_keys were updated. As the kvm hyp code was only called later than this, the fallback is redundant but not functionally harmful. For more details, see commit: 63a1e1c95e60e798 ("arm64/cpufeature: don't use mutex in bringup path") Today we have more users of cpus_have_const_cap() which are only called once the relevant static keys are initialized, and it would be beneficial to avoid the redundant code. To that end, this patch adds a new cpus_have_final_cap(), helper which is intend to be used in code which is only run once capabilities have been finalized, and will never check the cpus_hwcap array. This helps the compiler to generate better code as it no longer needs to generate code to address and test the cpus_hwcap array. To help catch misuse, cpus_have_final_cap() will BUG() if called before capabilities are finalized. In hyp context, BUG() will result in a hyp panic, but the specific BUG() instance will not be identified in the usual way. Comments are added to the various cpus_have_*_cap() helpers to describe the constraints on when they can be used. For clarity cpus_have_cap() is moved above the other helpers. Similarly the helpers are updated to use system_capabilities_finalized() consistently, and this is made __always_inline as required by its new callers. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Reviewed-by: Suzuki K Poulose Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 58 ++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..940b2b67b428 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -390,14 +390,16 @@ unsigned long cpu_get_elf_hwcap2(void); #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) -/* System capability check for constant caps */ -static __always_inline bool __cpus_have_const_cap(int num) +static __always_inline bool system_capabilities_finalized(void) { - if (num >= ARM64_NCAPS) - return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return static_branch_likely(&arm64_const_caps_ready); } +/* + * Test for a capability with a runtime check. + * + * Before the capability is detected, this returns false. + */ static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) @@ -405,14 +407,53 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this returns false. + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool __cpus_have_const_cap(int num) +{ + if (num >= ARM64_NCAPS) + return false; + return static_branch_unlikely(&cpu_hwcap_keys[num]); +} + +/* + * Test for a capability, possibly with a runtime check. + * + * Before capabilities are finalized, this behaves as cpus_have_cap(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ static __always_inline bool cpus_have_const_cap(int num) { - if (static_branch_likely(&arm64_const_caps_ready)) + if (system_capabilities_finalized()) return __cpus_have_const_cap(num); else return cpus_have_cap(num); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this will BUG(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool cpus_have_final_cap(int num) +{ + if (system_capabilities_finalized()) + return __cpus_have_const_cap(num); + else + BUG(); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { @@ -613,11 +654,6 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } -static inline bool system_capabilities_finalized(void) -{ - return static_branch_likely(&arm64_const_caps_ready); -} - #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 -- cgit From b5475d8caedb71476f999a858ea3f8c24c5f9e50 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 21 Feb 2020 14:50:22 +0000 Subject: arm64: kvm: hyp: use cpus_have_final_cap() The KVM hyp code is only run after system capabilities have been finalized, and thus all const cap checks have been patched. This is noted in in __cpu_init_hyp_mode(), where we BUG() if called too early: | /* | * Call initialization code, and switch to the full blown HYP code. | * If the cpucaps haven't been finalized yet, something has gone very | * wrong, and hyp will crash and burn when it uses any | * cpus_have_const_cap() wrapper. | */ Given this, the hyp code can use cpus_have_final_cap() and avoid generating code to check the cpu_hwcaps array, which would be unsafe to run in hyp context. This patch migrate the KVM hyp code to cpus_have_final_cap(), avoiding this redundant code generation, and making it possible to detect if we accidentally invoke this code too early. In the latter case, the BUG() in cpus_have_final_cap() will cause a hyp panic. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: James Morse Cc: Julien Thierry Cc: Suzuki Poulouse Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/switch.c | 14 +++++++------- arch/arm64/kvm/hyp/sysreg-sr.c | 8 ++++---- arch/arm64/kvm/hyp/tlb.c | 8 ++++---- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index dfe8dd172512..27fcdff08dd6 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -127,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) write_sysreg(val, cptr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; isb(); @@ -146,12 +146,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 hcr = vcpu->arch.hcr_el2; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) hcr |= HCR_TVM; write_sysreg(hcr, hcr_el2); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); if (has_vhe()) @@ -181,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) { u64 mdcr_el2 = read_sysreg(mdcr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -328,7 +328,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) * resolve the IPA using the AT instruction. */ if (!(esr & ESR_ELx_S1PTW) && - (cpus_have_const_cap(ARM64_WORKAROUND_834220) || + (cpus_have_final_cap(ARM64_WORKAROUND_834220) || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { if (!__translate_far_to_hpfar(far, &hpfar)) return false; @@ -498,7 +498,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (*exit_code != ARM_EXCEPTION_TRAP) goto exit; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && handle_tx2_tvm(vcpu)) return true; @@ -555,7 +555,7 @@ exit: static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) { - if (!cpus_have_const_cap(ARM64_SSBD)) + if (!cpus_have_final_cap(ARM64_SSBD)) return false; return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7672a978926c..75b1925763f1 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); } @@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); } else if (!ctxt->__hyp_running_vcpu) { @@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && ctxt->__hyp_running_vcpu) { /* * Must only be done for host registers, hence the context @@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); write_sysreg_el2(pstate, SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 92f560e3e1aa..ceaddbe4279f 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, local_irq_save(cxt->flags); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, * we cannot trust stage-1 to be in a correct state at that @@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR); @@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, { write_sysreg(0, vttbr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { /* Ensure write of the host VMID */ isb(); /* Restore the host's TCR_EL1 */ -- cgit From 0c837c4f73d1a31f47e80ebc50bcf23668d895c4 Mon Sep 17 00:00:00 2001 From: 韩科才 Date: Sun, 15 Mar 2020 12:01:19 +0800 Subject: arm64: fix spelling mistake "ca not" -> "cannot" There is a spelling mistake in the comment, Fix it. Signed-off-by: hankecai Signed-off-by: Catalin Marinas --- arch/arm64/lib/strcmp.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 4767540d1b94..4e79566726c8 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -186,7 +186,7 @@ CPU_LE( rev data2, data2 ) * as carry-propagation can corrupt the upper bits if the trailing * bytes in the string contain 0x01. * However, if there is no NUL byte in the dword, we can generate - * the result directly. We ca not just subtract the bytes as the + * the result directly. We cannot just subtract the bytes as the * MSB might be significant. */ CPU_BE( cbnz has_nul, 1f ) -- cgit From 62b9562a1c46913349e7897c4a39c544f176b3da Mon Sep 17 00:00:00 2001 From: Zheng Wei Date: Fri, 13 Mar 2020 22:54:02 +0800 Subject: arm64: add blank after 'if' add blank after 'if' for armv8_deprecated_init() to make it comply with kernel coding style. Signed-off-by: Zheng Wei Signed-off-by: Catalin Marinas --- arch/arm64/kernel/armv8_deprecated.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 7832b3216370..4cc581af2d96 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -630,7 +630,7 @@ static int __init armv8_deprecated_init(void) register_insn_emulation(&cp15_barrier_ops); if (IS_ENABLED(CONFIG_SETEND_EMULATION)) { - if(system_supports_mixed_endian_el0()) + if (system_supports_mixed_endian_el0()) register_insn_emulation(&setend_ops); else pr_info("setend instruction emulation is not supported on this system\n"); -- cgit From c2f4afdc3f99403d6d2ef37509c04caa98374620 Mon Sep 17 00:00:00 2001 From: Li Tao Date: Wed, 11 Mar 2020 15:31:55 +0800 Subject: arm64: kexec_file: Fixed code style. Remove unnecessary blank. Signed-off-by: Li Tao Signed-off-by: Catalin Marinas --- arch/arm64/kernel/machine_kexec_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index dd3ae8081b38..b40c3b0def92 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -121,7 +121,7 @@ static int setup_dtb(struct kimage *image, /* add kaslr-seed */ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); - if (ret == -FDT_ERR_NOTFOUND) + if (ret == -FDT_ERR_NOTFOUND) ret = 0; else if (ret) goto out; -- cgit From 24b2cce91f47d19fcc5a2c4c60dbabbd0e30adf1 Mon Sep 17 00:00:00 2001 From: 韩科才 Date: Wed, 11 Mar 2020 14:52:49 +0800 Subject: arm64: remove redundant blank for '=' operator remove redundant blank for '=' operator, it may be more elegant. Signed-off-by: hankecai Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..ce60d1012bfa 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -551,7 +551,7 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) BUG_ON(!reg); - for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { u64 ftr_mask = arm64_ftr_mask(ftrp); s64 ftr_new = arm64_ftr_value(ftrp, new); -- cgit From d22b115cbfbb7e4a938f9eb6ea77da9ecac3df5a Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 2 Mar 2020 13:03:40 +1100 Subject: arm64/kernel: Simplify __cpu_up() by bailing out early The function __cpu_up() is invoked to bring up the target CPU through the backend, PSCI for example. The nested if statements won't be needed if we bail out early on the following two conditions where the status won't be checked. The code looks simplified in that case. * Error returned from the backend (e.g. PSCI) * The target CPU has been marked as onlined Signed-off-by: Gavin Shan Signed-off-by: Catalin Marinas Reviewed-by: Mark Rutland --- arch/arm64/kernel/smp.c | 79 +++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..2a9d8f39dc58 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -115,60 +115,55 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) update_cpu_boot_status(CPU_MMU_OFF); __flush_dcache_area(&secondary_data, sizeof(secondary_data)); - /* - * Now bring the CPU into our world. - */ + /* Now bring the CPU into our world */ ret = boot_secondary(cpu, idle); - if (ret == 0) { - /* - * CPU was successfully started, wait for it to come online or - * time out. - */ - wait_for_completion_timeout(&cpu_running, - msecs_to_jiffies(5000)); - - if (!cpu_online(cpu)) { - pr_crit("CPU%u: failed to come online\n", cpu); - ret = -EIO; - } - } else { + if (ret) { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); return ret; } + /* + * CPU was successfully started, wait for it to come online or + * time out. + */ + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(5000)); + if (cpu_online(cpu)) + return 0; + + pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; secondary_data.stack = NULL; __flush_dcache_area(&secondary_data, sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); - if (ret && status) { - - if (status == CPU_MMU_OFF) - status = READ_ONCE(__early_cpu_boot_status); + if (status == CPU_MMU_OFF) + status = READ_ONCE(__early_cpu_boot_status); - switch (status & CPU_BOOT_STATUS_MASK) { - default: - pr_err("CPU%u: failed in unknown state : 0x%lx\n", - cpu, status); - cpus_stuck_in_kernel++; - break; - case CPU_KILL_ME: - if (!op_cpu_kill(cpu)) { - pr_crit("CPU%u: died during early boot\n", cpu); - break; - } - pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ - case CPU_STUCK_IN_KERNEL: - pr_crit("CPU%u: is stuck in kernel\n", cpu); - if (status & CPU_STUCK_REASON_52_BIT_VA) - pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); - if (status & CPU_STUCK_REASON_NO_GRAN) - pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K); - cpus_stuck_in_kernel++; + switch (status & CPU_BOOT_STATUS_MASK) { + default: + pr_err("CPU%u: failed in unknown state : 0x%lx\n", + cpu, status); + cpus_stuck_in_kernel++; + break; + case CPU_KILL_ME: + if (!op_cpu_kill(cpu)) { + pr_crit("CPU%u: died during early boot\n", cpu); break; - case CPU_PANIC_KERNEL: - panic("CPU%u detected unsupported configuration\n", cpu); } + pr_crit("CPU%u: may not have shut down cleanly\n", cpu); + /* Fall through */ + case CPU_STUCK_IN_KERNEL: + pr_crit("CPU%u: is stuck in kernel\n", cpu); + if (status & CPU_STUCK_REASON_52_BIT_VA) + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); + if (status & CPU_STUCK_REASON_NO_GRAN) { + pr_crit("CPU%u: does not support %luK granule\n", + cpu, PAGE_SIZE / SZ_1K); + } + cpus_stuck_in_kernel++; + break; + case CPU_PANIC_KERNEL: + panic("CPU%u detected unsupported configuration\n", cpu); } return ret; -- cgit From c17a290f7e7e59d24b4507736b7b40b0eb5f8f1f Mon Sep 17 00:00:00 2001 From: "glider@google.com" Date: Thu, 12 Mar 2020 16:59:20 +0100 Subject: arm64: define __alloc_zeroed_user_highpage When running the kernel with init_on_alloc=1, calling the default implementation of __alloc_zeroed_user_highpage() from include/linux/highmem.h leads to double-initialization of the allocated page (first by the page allocator, then by clear_user_page(). Calling alloc_page_vma() with __GFP_ZERO, similarly to e.g. x86, seems to be enough to ensure the user page is zeroed only once. Signed-off-by: Alexander Potapenko Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/page.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index d39ddb258a04..75d6cd23a679 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -21,6 +21,10 @@ extern void __cpu_copy_user_page(void *to, const void *from, extern void copy_page(void *to, const void *from); extern void clear_page(void *to); +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) -- cgit From 06236821aeac480a0835dd8dd9fb20e3b5a5d80d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:37:15 +0100 Subject: perf: arm-ccn: Use scnprintf() for robustness snprintf() is a hard-to-use function, it's especially difficult to use it for concatenating substrings in a buffer with a limited size. Since snprintf() returns the would-be-output size, not the actual size, the subsequent use of snprintf() may point to the incorrect position easily. Although the current code doesn't actually overflow the buffer, it's an incorrect usage. This patch replaces such snprintf() calls with a safer version, scnprintf(). Acked-by: Mark Rutland Signed-off-by: Takashi Iwai Signed-off-by: Will Deacon --- drivers/perf/arm-ccn.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index fea354d6fb29..d50edef91f59 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -328,15 +328,15 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, struct arm_ccn_pmu_event, attr); ssize_t res; - res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); + res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type); if (event->event) - res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", + res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", event->event); if (event->def) - res += snprintf(buf + res, PAGE_SIZE - res, ",%s", + res += scnprintf(buf + res, PAGE_SIZE - res, ",%s", event->def); if (event->mask) - res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", + res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", event->mask); /* Arguments required by an event */ @@ -344,25 +344,25 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, case CCN_TYPE_CYCLES: break; case CCN_TYPE_XP: - res += snprintf(buf + res, PAGE_SIZE - res, + res += scnprintf(buf + res, PAGE_SIZE - res, ",xp=?,vc=?"); if (event->event == CCN_EVENT_WATCHPOINT) - res += snprintf(buf + res, PAGE_SIZE - res, + res += scnprintf(buf + res, PAGE_SIZE - res, ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?"); else - res += snprintf(buf + res, PAGE_SIZE - res, + res += scnprintf(buf + res, PAGE_SIZE - res, ",bus=?"); break; case CCN_TYPE_MN: - res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); + res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); break; default: - res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); + res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?"); break; } - res += snprintf(buf + res, PAGE_SIZE - res, "\n"); + res += scnprintf(buf + res, PAGE_SIZE - res, "\n"); return res; } -- cgit From 29227d6ea1572b160e5bea45b3c93a0346444dfa Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 17 Mar 2020 18:22:54 +0000 Subject: arm64: perf: Clean up enable/disable calls Reading this code bordered on painful, what with all the repetition and pointless return values. More fundamentally, dribbling the hardware enables and disables in one bit at a time incurs needless system register overhead for chained events and on reset. We already use bitmask values for the KVM hooks, so consolidate all the register accesses to match, and make a reasonable saving in both source and object code. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 87 +++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 52 deletions(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 726cd8bda025..8062d79f4cbb 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -450,86 +450,74 @@ static inline void armv8pmu_write_event_type(struct perf_event *event) } } -static inline int armv8pmu_enable_counter(int idx) +static u32 armv8pmu_event_cnten_mask(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmcntenset_el0); - return idx; + int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + u32 mask = BIT(counter); + + if (armv8pmu_event_is_chained(event)) + mask |= BIT(counter - 1); + return mask; +} + +static inline void armv8pmu_enable_counter(u32 mask) +{ + write_sysreg(mask, pmcntenset_el0); } static inline void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - int idx = event->hw.idx; - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); - - if (armv8pmu_event_is_chained(event)) - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + u32 mask = armv8pmu_event_cnten_mask(event); - kvm_set_pmu_events(counter_bits, attr); + kvm_set_pmu_events(mask, attr); /* We rely on the hypervisor switch code to enable guest counters */ - if (!kvm_pmu_counter_deferred(attr)) { - armv8pmu_enable_counter(idx); - if (armv8pmu_event_is_chained(event)) - armv8pmu_enable_counter(idx - 1); - } + if (!kvm_pmu_counter_deferred(attr)) + armv8pmu_enable_counter(mask); } -static inline int armv8pmu_disable_counter(int idx) +static inline void armv8pmu_disable_counter(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmcntenclr_el0); - return idx; + write_sysreg(mask, pmcntenclr_el0); } static inline void armv8pmu_disable_event_counter(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; struct perf_event_attr *attr = &event->attr; - int idx = hwc->idx; - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); + u32 mask = armv8pmu_event_cnten_mask(event); - if (armv8pmu_event_is_chained(event)) - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); - - kvm_clr_pmu_events(counter_bits); + kvm_clr_pmu_events(mask); /* We rely on the hypervisor switch code to disable guest counters */ - if (!kvm_pmu_counter_deferred(attr)) { - if (armv8pmu_event_is_chained(event)) - armv8pmu_disable_counter(idx - 1); - armv8pmu_disable_counter(idx); - } + if (!kvm_pmu_counter_deferred(attr)) + armv8pmu_disable_counter(mask); } -static inline int armv8pmu_enable_intens(int idx) +static inline void armv8pmu_enable_intens(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmintenset_el1); - return idx; + write_sysreg(mask, pmintenset_el1); } -static inline int armv8pmu_enable_event_irq(struct perf_event *event) +static inline void armv8pmu_enable_event_irq(struct perf_event *event) { - return armv8pmu_enable_intens(event->hw.idx); + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + armv8pmu_enable_intens(BIT(counter)); } -static inline int armv8pmu_disable_intens(int idx) +static inline void armv8pmu_disable_intens(u32 mask) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(BIT(counter), pmintenclr_el1); + write_sysreg(mask, pmintenclr_el1); isb(); /* Clear the overflow flag in case an interrupt is pending. */ - write_sysreg(BIT(counter), pmovsclr_el0); + write_sysreg(mask, pmovsclr_el0); isb(); - - return idx; } -static inline int armv8pmu_disable_event_irq(struct perf_event *event) +static inline void armv8pmu_disable_event_irq(struct perf_event *event) { - return armv8pmu_disable_intens(event->hw.idx); + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + armv8pmu_disable_intens(BIT(counter)); } static inline u32 armv8pmu_getreset_flags(void) @@ -814,14 +802,9 @@ static int armv8pmu_filter_match(struct perf_event *event) static void armv8pmu_reset(void *info) { - struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 idx, nb_cnt = cpu_pmu->num_events; - /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { - armv8pmu_disable_counter(idx); - armv8pmu_disable_intens(idx); - } + armv8pmu_disable_counter(U32_MAX); + armv8pmu_disable_intens(U32_MAX); /* Clear the counters we flip at guest entry/exit */ kvm_clr_pmu_events(U32_MAX); -- cgit From 8e35aa642ee4dab01b16cc4b2df59d1936f3b3c2 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:50 +0000 Subject: arm64: cpufeature: Extract capped perfmon fields When emulating ID registers there is often a need to cap the version bits of a feature such that the guest will not use features that the host is not aware of. For example, when KVM mediates access to the PMU by emulating register accesses. Let's add a helper that extracts a performance monitors ID field and caps the version to a given value. Fields that identify the version of the Performance Monitors Extension do not follow the standard ID scheme, and instead follow the scheme described in ARM DDI 0487E.a page D13-2825 "Alternative ID scheme used for the Performance Monitors Extension version". The value 0xF means an IMPLEMENTATION DEFINED PMU is present, and values 0x0-OxE can be treated the same as an unsigned field with 0x0 meaning no PMU is present. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: rework to handle perfmon fields] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..186f4e19207e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -447,6 +447,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field) return cpuid_feature_extract_unsigned_field_width(features, field, 4); } +/* + * Fields that identify the version of the Performance Monitors Extension do + * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825, + * "Alternative ID scheme used for the Performance Monitors Extension version". + */ +static inline u64 __attribute_const__ +cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) +{ + u64 val = cpuid_feature_extract_unsigned_field(features, field); + u64 mask = GENMASK_ULL(field + 3, field); + + /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ + if (val == 0xf) + val = 0; + + if (val > cap) { + features &= ~mask; + features |= (cap << field) & mask; + } + + return features; +} + static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); -- cgit From c854188ea01062f5a5fd7f05658feb1863774eaa Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:51 +0000 Subject: KVM: arm64: limit PMU version to PMUv3 for ARMv8.1 We currently expose the PMU version of the host to the guest via emulation of the DFR0_EL1 and AA64DFR0_EL1 debug feature registers. However many of the features offered beyond PMUv3 for 8.1 are not supported in KVM. Examples of this include support for the PMMIR registers (added in PMUv3 for ARMv8.4) and 64-bit event counters added in (PMUv3 for ARMv8.5). Let's trap the Debug Feature Registers in order to limit PMUVer/PerfMon in the Debug Feature Registers to PMUv3 for ARMv8.1 to avoid unexpected behaviour. Both ID_AA64DFR0.PMUVer and ID_DFR0.PerfMon follow the "Alternative ID scheme used for the Performance Monitors Extension version" where 0xF means an IMPLEMENTATION DEFINED PMU is implemented, and values 0x0-0xE are treated as with an unsigned field (with 0x0 meaning no PMU is present). As we don't expect to expose an IMPLEMENTATION DEFINED PMU, and our cap is below 0xF, we can treat these fields as unsigned when applying the cap. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: make field names consistent, use perfmon cap] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 6 ++++++ arch/arm64/kvm/sys_regs.c | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b91570ff9db1..d8f1eed070f0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,6 +702,12 @@ #define ID_AA64DFR0_TRACEVER_SHIFT 4 #define ID_AA64DFR0_DEBUGVER_SHIFT 0 +#define ID_AA64DFR0_PMUVER_8_1 0x4 + +#define ID_DFR0_PERFMON_SHIFT 24 + +#define ID_DFR0_PERFMON_8_1 0x4 + #define ID_ISAR5_RDM_SHIFT 24 #define ID_ISAR5_CRC32_SHIFT 16 #define ID_ISAR5_SHA2_SHIFT 12 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..b0a3e8976b90 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1085,6 +1085,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (0xfUL << ID_AA64ISAR1_API_SHIFT) | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); + } else if (id == SYS_ID_AA64DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_AA64DFR0_PMUVER_SHIFT, + ID_AA64DFR0_PMUVER_8_1); + } else if (id == SYS_ID_DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_DFR0_PERFMON_SHIFT, + ID_DFR0_PERFMON_8_1); } return val; -- cgit From 8673e02e58410e6c4cefa499efa846286e45a991 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 2 Mar 2020 18:17:52 +0000 Subject: arm64: perf: Add support for ARMv8.5-PMU 64-bit counters At present ARMv8 event counters are limited to 32-bits, though by using the CHAIN event it's possible to combine adjacent counters to achieve 64-bits. The perf config1:0 bit can be set to use such a configuration. With the introduction of ARMv8.5-PMU support, all event counters can now be used as 64-bit counters. Let's enable 64-bit event counters where support exists. Unless the user sets config1:0 we will adjust the counter value such that it overflows upon 32-bit overflow. This follows the same behaviour as the cycle counter which has always been (and remains) 64-bits. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose [Mark: fix ID field names, compare with 8.5 value] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/perf_event.h | 3 +- arch/arm64/include/asm/sysreg.h | 4 ++ arch/arm64/kernel/perf_event.c | 87 ++++++++++++++++++++++++++++++------- include/linux/perf/arm_pmu.h | 1 + 4 files changed, 78 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2bdbc79bbd01..e7765b62c712 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -176,9 +176,10 @@ #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ +#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ /* * PMOVSR: counters overflow flag status reg diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d8f1eed070f0..9b66c5b5b36f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,7 +702,11 @@ #define ID_AA64DFR0_TRACEVER_SHIFT 4 #define ID_AA64DFR0_DEBUGVER_SHIFT 0 +#define ID_AA64DFR0_PMUVER_8_0 0x1 #define ID_AA64DFR0_PMUVER_8_1 0x4 +#define ID_AA64DFR0_PMUVER_8_4 0x5 +#define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 8062d79f4cbb..4d7879484cec 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -285,6 +285,17 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) + +/* + * We unconditionally enable ARMv8.5-PMU long event counter support + * (64-bit events) where supported. Indicate if this arm_pmu has long + * event counter support. + */ +static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) +{ + return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); +} + /* * We must chain two programmable counters for 64 bit events, * except when we have allocated the 64bit cycle counter (for CPU @@ -294,9 +305,11 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { static inline bool armv8pmu_event_is_chained(struct perf_event *event) { int idx = event->hw.idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); return !WARN_ON(idx < 0) && armv8pmu_event_is_64bit(event) && + !armv8pmu_has_long_event(cpu_pmu) && (idx != ARMV8_IDX_CYCLE_COUNTER); } @@ -345,7 +358,7 @@ static inline void armv8pmu_select_counter(int idx) isb(); } -static inline u32 armv8pmu_read_evcntr(int idx) +static inline u64 armv8pmu_read_evcntr(int idx) { armv8pmu_select_counter(idx); return read_sysreg(pmxevcntr_el0); @@ -362,6 +375,44 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) return val; } +/* + * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP + * is set the event counters also become 64-bit counters. Unless the + * user has requested a long counter (attr.config1) then we want to + * interrupt upon 32-bit overflow - we achieve this by applying a bias. + */ +static bool armv8pmu_event_needs_bias(struct perf_event *event) +{ + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (armv8pmu_event_is_64bit(event)) + return false; + + if (armv8pmu_has_long_event(cpu_pmu) || + idx == ARMV8_IDX_CYCLE_COUNTER) + return true; + + return false; +} + +static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) +{ + if (armv8pmu_event_needs_bias(event)) + value |= GENMASK(63, 32); + + return value; +} + +static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) +{ + if (armv8pmu_event_needs_bias(event)) + value &= ~GENMASK(63, 32); + + return value; +} + static u64 armv8pmu_read_counter(struct perf_event *event) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); @@ -377,10 +428,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event) else value = armv8pmu_read_hw_counter(event); - return value; + return armv8pmu_unbias_long_counter(event, value); } -static inline void armv8pmu_write_evcntr(int idx, u32 value) +static inline void armv8pmu_write_evcntr(int idx, u64 value) { armv8pmu_select_counter(idx); write_sysreg(value, pmxevcntr_el0); @@ -405,20 +456,14 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + value = armv8pmu_bias_long_counter(event, value); + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); - else if (idx == ARMV8_IDX_CYCLE_COUNTER) { - /* - * The cycles counter is really a 64-bit counter. - * When treating it as a 32-bit counter, we only count - * the lower 32 bits, and set the upper 32-bits so that - * we get an interrupt upon 32-bit overflow. - */ - if (!armv8pmu_event_is_64bit(event)) - value |= 0xffffffff00000000ULL; + else if (idx == ARMV8_IDX_CYCLE_COUNTER) write_sysreg(value, pmccntr_el0); - } else + else armv8pmu_write_hw_counter(event, value); } @@ -731,7 +776,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* * Otherwise use events counters */ - if (armv8pmu_event_is_64bit(event)) + if (armv8pmu_event_is_64bit(event) && + !armv8pmu_has_long_event(cpu_pmu)) return armv8pmu_get_chain_idx(cpuc, cpu_pmu); else return armv8pmu_get_single_idx(cpuc, cpu_pmu); @@ -802,6 +848,9 @@ static int armv8pmu_filter_match(struct perf_event *event) static void armv8pmu_reset(void *info) { + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; + u32 pmcr; + /* The counter and interrupt enable registers are unknown at reset. */ armv8pmu_disable_counter(U32_MAX); armv8pmu_disable_intens(U32_MAX); @@ -813,8 +862,13 @@ static void armv8pmu_reset(void *info) * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). */ - armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | - ARMV8_PMU_PMCR_LC); + pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC; + + /* Enable long event counter support where available */ + if (armv8pmu_has_long_event(cpu_pmu)) + pmcr |= ARMV8_PMU_PMCR_LP; + + armv8pmu_pmcr_write(pmcr); } static int __armv8_pmuv3_map_event(struct perf_event *event, @@ -897,6 +951,7 @@ static void __armv8pmu_probe_pmu(void *info) if (pmuver == 0xf || pmuver == 0) return; + cpu_pmu->pmuver = pmuver; probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 71f525a35ac2..5b616dde9a4c 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -80,6 +80,7 @@ struct arm_pmu { struct pmu pmu; cpumask_t supported_cpus; char *name; + int pmuver; irqreturn_t (*handle_irq)(struct arm_pmu *pmu); void (*enable)(struct perf_event *event); void (*disable)(struct perf_event *event); -- cgit From 7fec52bf8095a95d7f698e9a165d4cace514a204 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:42 +1100 Subject: arm64: Declare ACPI parking protocol CPU operation if needed It's obvious we needn't declare the corresponding CPU operation when CONFIG_ARM64_ACPI_PARKING_PROTOCOL is disabled, even it doesn't cause any compiling warnings. Signed-off-by: Gavin Shan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu_ops.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index 7e07072757af..2082cfb1be86 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -15,7 +15,9 @@ #include extern const struct cpu_operations smp_spin_table_ops; +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL extern const struct cpu_operations acpi_parking_protocol_ops; +#endif extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; -- cgit From 6885fb129be30c627eb2f5b1498dba498ff6c037 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:43 +1100 Subject: arm64: Rename cpu_read_ops() to init_cpu_ops() This renames cpu_read_ops() to init_cpu_ops() as the function is only called in initialization phase. Also, we will introduce get_cpu_ops() in the subsequent patches, to retireve the CPU operation by the given CPU index. The usage of cpu_read_ops() and get_cpu_ops() are difficult to be distinguished from their names. Signed-off-by: Gavin Shan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu_ops.h | 6 +++--- arch/arm64/kernel/cpu_ops.c | 2 +- arch/arm64/kernel/setup.c | 2 +- arch/arm64/kernel/smp.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 86aabf1e0199..baa13b5db2ca 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -56,11 +56,11 @@ struct cpu_operations { }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; -int __init cpu_read_ops(int cpu); +int __init init_cpu_ops(int cpu); -static inline void __init cpu_read_bootcpu_ops(void) +static inline void __init init_bootcpu_ops(void) { - cpu_read_ops(0); + init_cpu_ops(0); } #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index 2082cfb1be86..a6c3c816b618 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -96,7 +96,7 @@ static const char *__init cpu_read_enable_method(int cpu) /* * Read a cpu's enable method and record it in cpu_ops. */ -int __init cpu_read_ops(int cpu) +int __init init_cpu_ops(int cpu) { const char *enable_method = cpu_read_enable_method(cpu); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index a34890bf309f..f66bd260cce8 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -344,7 +344,7 @@ void __init setup_arch(char **cmdline_p) else psci_acpi_init(); - cpu_read_bootcpu_ops(); + init_bootcpu_ops(); smp_init_cpus(); smp_build_mpidr_hash(); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..6f8477d7f3be 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -488,7 +488,7 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) */ static int __init smp_cpu_setup(int cpu) { - if (cpu_read_ops(cpu)) + if (init_cpu_ops(cpu)) return -ENODEV; if (cpu_ops[cpu]->cpu_init(cpu)) -- cgit From de58ed5e16e62f36c7ed05552f18b7f9c647dcaf Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 19 Mar 2020 10:01:44 +1100 Subject: arm64: Introduce get_cpu_ops() helper function This introduces get_cpu_ops() to return the CPU operations according to the given CPU index. For now, it simply returns the @cpu_ops[cpu] as before. Also, helper function __cpu_try_die() is introduced to be shared by cpu_die() and ipi_cpu_crash_stop(). So it shouldn't introduce any functional changes. Signed-off-by: Gavin Shan Signed-off-by: Catalin Marinas Acked-by: Mark Rutland --- arch/arm64/include/asm/cpu_ops.h | 2 +- arch/arm64/kernel/cpu_ops.c | 7 +++- arch/arm64/kernel/cpuidle.c | 9 +++--- arch/arm64/kernel/setup.c | 6 ++-- arch/arm64/kernel/smp.c | 70 ++++++++++++++++++++++++++-------------- 5 files changed, 62 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index baa13b5db2ca..d28e8f37d3b4 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -55,8 +55,8 @@ struct cpu_operations { #endif }; -extern const struct cpu_operations *cpu_ops[NR_CPUS]; int __init init_cpu_ops(int cpu); +extern const struct cpu_operations *get_cpu_ops(int cpu); static inline void __init init_bootcpu_ops(void) { diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index a6c3c816b618..e133011f64b5 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -20,7 +20,7 @@ extern const struct cpu_operations acpi_parking_protocol_ops; #endif extern const struct cpu_operations cpu_psci_ops; -const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; +static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, @@ -111,3 +111,8 @@ int __init init_cpu_ops(int cpu) return 0; } + +const struct cpu_operations *get_cpu_ops(int cpu) +{ + return cpu_ops[cpu]; +} diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index e4d6af2fdec7..b512b5503f6e 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -18,11 +18,11 @@ int arm_cpuidle_init(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); int ret = -EOPNOTSUPP; - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend && - cpu_ops[cpu]->cpu_init_idle) - ret = cpu_ops[cpu]->cpu_init_idle(cpu); + if (ops && ops->cpu_suspend && ops->cpu_init_idle) + ret = ops->cpu_init_idle(cpu); return ret; } @@ -37,8 +37,9 @@ int arm_cpuidle_init(unsigned int cpu) int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(cpu); - return cpu_ops[cpu]->cpu_suspend(index); + return ops->cpu_suspend(index); } #ifdef CONFIG_ACPI diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f66bd260cce8..3fd2c11c09fc 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -371,8 +371,10 @@ void __init setup_arch(char **cmdline_p) static inline bool cpu_can_disable(unsigned int cpu) { #ifdef CONFIG_HOTPLUG_CPU - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable) - return cpu_ops[cpu]->cpu_can_disable(cpu); + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_can_disable) + return ops->cpu_can_disable(cpu); #endif return false; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 6f8477d7f3be..e5c9862c271b 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -93,8 +93,10 @@ static inline int op_cpu_kill(unsigned int cpu) */ static int boot_secondary(unsigned int cpu, struct task_struct *idle) { - if (cpu_ops[cpu]->cpu_boot) - return cpu_ops[cpu]->cpu_boot(cpu); + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops->cpu_boot) + return ops->cpu_boot(cpu); return -EOPNOTSUPP; } @@ -196,6 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; + const struct cpu_operations *ops; unsigned int cpu; cpu = task_cpu(current); @@ -227,8 +230,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ check_local_cpu_capabilities(); - if (cpu_ops[cpu]->cpu_postboot) - cpu_ops[cpu]->cpu_postboot(); + ops = get_cpu_ops(cpu); + if (ops->cpu_postboot) + ops->cpu_postboot(); /* * Log the CPU info before it is marked online and might get read. @@ -266,19 +270,21 @@ asmlinkage notrace void secondary_start_kernel(void) #ifdef CONFIG_HOTPLUG_CPU static int op_cpu_disable(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); + /* * If we don't have a cpu_die method, abort before we reach the point * of no return. CPU0 may not have an cpu_ops, so test for it. */ - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) + if (!ops || !ops->cpu_die) return -EOPNOTSUPP; /* * We may need to abort a hot unplug for some other mechanism-specific * reason. */ - if (cpu_ops[cpu]->cpu_disable) - return cpu_ops[cpu]->cpu_disable(cpu); + if (ops->cpu_disable) + return ops->cpu_disable(cpu); return 0; } @@ -314,15 +320,17 @@ int __cpu_disable(void) static int op_cpu_kill(unsigned int cpu) { + const struct cpu_operations *ops = get_cpu_ops(cpu); + /* * If we have no means of synchronising with the dying CPU, then assume * that it is really dead. We can only wait for an arbitrary length of * time and hope that it's dead, so let's skip the wait and just hope. */ - if (!cpu_ops[cpu]->cpu_kill) + if (!ops->cpu_kill) return 0; - return cpu_ops[cpu]->cpu_kill(cpu); + return ops->cpu_kill(cpu); } /* @@ -357,6 +365,7 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { unsigned int cpu = smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(cpu); idle_task_exit(); @@ -370,12 +379,22 @@ void cpu_die(void) * mechanism must perform all required cache maintenance to ensure that * no dirty lines are lost in the process of shutting down the CPU. */ - cpu_ops[cpu]->cpu_die(cpu); + ops->cpu_die(cpu); BUG(); } #endif +static void __cpu_try_die(int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + const struct cpu_operations *ops = get_cpu_ops(cpu); + + if (ops && ops->cpu_die) + ops->cpu_die(cpu); +#endif +} + /* * Kill the calling secondary CPU, early in bringup before it is turned * online. @@ -389,12 +408,11 @@ void cpu_die_early(void) /* Mark this CPU absent */ set_cpu_present(cpu, 0); -#ifdef CONFIG_HOTPLUG_CPU - update_cpu_boot_status(CPU_KILL_ME); - /* Check if we can park ourselves */ - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) - cpu_ops[cpu]->cpu_die(cpu); -#endif + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + update_cpu_boot_status(CPU_KILL_ME); + __cpu_try_die(cpu); + } + update_cpu_boot_status(CPU_STUCK_IN_KERNEL); cpu_park_loop(); @@ -488,10 +506,13 @@ static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) */ static int __init smp_cpu_setup(int cpu) { + const struct cpu_operations *ops; + if (init_cpu_ops(cpu)) return -ENODEV; - if (cpu_ops[cpu]->cpu_init(cpu)) + ops = get_cpu_ops(cpu); + if (ops->cpu_init(cpu)) return -ENODEV; set_cpu_possible(cpu, true); @@ -714,6 +735,7 @@ void __init smp_init_cpus(void) void __init smp_prepare_cpus(unsigned int max_cpus) { + const struct cpu_operations *ops; int err; unsigned int cpu; unsigned int this_cpu; @@ -744,10 +766,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (cpu == smp_processor_id()) continue; - if (!cpu_ops[cpu]) + ops = get_cpu_ops(cpu); + if (!ops) continue; - err = cpu_ops[cpu]->cpu_prepare(cpu); + err = ops->cpu_prepare(cpu); if (err) continue; @@ -863,10 +886,8 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) local_irq_disable(); sdei_mask_local_cpu(); -#ifdef CONFIG_HOTPLUG_CPU - if (cpu_ops[cpu]->cpu_die) - cpu_ops[cpu]->cpu_die(cpu); -#endif + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) + __cpu_try_die(cpu); /* just in case */ cpu_park_loop(); @@ -1044,8 +1065,9 @@ static bool have_cpu_die(void) { #ifdef CONFIG_HOTPLUG_CPU int any_cpu = raw_smp_processor_id(); + const struct cpu_operations *ops = get_cpu_ops(any_cpu); - if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) + if (ops && ops->cpu_die) return true; #endif return false; -- cgit