From eef94a3d09aab437c8c254de942d8b1aa76455e2 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Thu, 31 Aug 2017 11:30:50 +0300 Subject: arm64: move TASK_* definitions to ILP32 series [1] introduces the dependency on for TASK_SIZE macro. Which in turn requires , and include , giving a circular dependency, because TASK_SIZE is currently located in . In other architectures, TASK_SIZE is defined in , and moving TASK_SIZE there fixes the problem. Discussion: https://patchwork.kernel.org/patch/9929107/ [1] https://github.com/norov/linux/tree/ilp32-next CC: Will Deacon CC: Laura Abbott Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Suggested-by: Mark Rutland Signed-off-by: Yury Norov Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 15 --------------- arch/arm64/include/asm/processor.h | 21 +++++++++++++++++++++ arch/arm64/kernel/entry.S | 2 +- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3585a5e26151..87296a6ed1b3 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -61,8 +61,6 @@ * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) - \ @@ -77,19 +75,6 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#ifdef CONFIG_COMPAT -#define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#else -#define TASK_SIZE TASK_SIZE_64 -#endif /* CONFIG_COMPAT */ - -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define KERNEL_START _text #define KERNEL_END _end diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 29adab8138c3..7dddca21fc64 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,6 +19,10 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#ifndef __ASSEMBLY__ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -37,6 +41,22 @@ #include #include +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + */ +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -194,4 +214,5 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1c59d4008a8..f5e851eeda4b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include -- cgit From 9339fd348dd98ed64879a48dc7290cc3fa4e2911 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Wed, 13 Sep 2017 21:08:30 +0300 Subject: arm64: fix documentation on kernel pages mappings to HYP VA The Documentation/arm64/memory.txt says: When using KVM, the hypervisor maps kernel pages in EL2, at a fixed offset from the kernel VA (top 24bits of the kernel VA set to zero): In fact, kernel addresses are transleted to HYP with kern_hyp_va macro, which has more options, and none of them assumes clearing of top 24bits of the kernel VA. Acked-by: Marc Zyngier Signed-off-by: Yury Norov [will: removed gory details] Signed-off-by: Will Deacon --- Documentation/arm64/memory.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt index d7273a5f6456..671bc0639262 100644 --- a/Documentation/arm64/memory.txt +++ b/Documentation/arm64/memory.txt @@ -86,9 +86,9 @@ Translation table lookup with 64KB pages: +-------------------------------------------------> [63] TTBR0/1 -When using KVM, the hypervisor maps kernel pages in EL2, at a fixed -offset from the kernel VA (top 24bits of the kernel VA set to zero): +When using KVM without the Virtualization Host Extensions, the hypervisor +maps kernel pages in EL2 at a fixed offset from the kernel VA. See the +kern_hyp_va macro for more details. -Start End Size Use ------------------------------------------------------------------------ -0000004000000000 0000007fffffffff 256GB kernel objects mapped in HYP +When using KVM with the Virtualization Host Extensions, no additional +mappings are created, since the host kernel runs directly in EL2. -- cgit From f67d5c4fbe8051af33e9109aa3d3024602d29474 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Sep 2017 11:01:26 +0100 Subject: arm64: mm: Remove useless and wrong comments from fault.c Fault.c seems to be a magnet for useless and wrong comments, largely due to its ancestry in other architectures where the code has since moved on, but the comments have remained intact. This patch removes both useless and incorrect comments, leaving only those that say something correct and relevant. Reported-by: Wenjia Zhou Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 52 +-------------------------------------------------- 1 file changed, 1 insertion(+), 51 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2069e9bc0fca..d224c9d384a1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -105,9 +105,6 @@ static void data_abort_decode(unsigned int esr) (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); } -/* - * Decode mem abort information - */ static void mem_abort_decode(unsigned int esr) { pr_alert("Mem abort info:\n"); @@ -249,9 +246,6 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, return false; } -/* - * The kernel tried to access some page that wasn't present. - */ static void __do_kernel_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -264,9 +258,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; - /* - * No handler, we'll have to terminate things with extreme prejudice. - */ bust_spinlocks(1); if (is_permission_fault(esr, regs, addr)) { @@ -291,10 +282,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, do_exit(SIGKILL); } -/* - * Something tried to access memory that isn't in our memory map. User mode - * accesses just cause a SIGSEGV - */ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, unsigned int esr, unsigned int sig, int code, struct pt_regs *regs, int fault) @@ -559,23 +546,6 @@ no_context: return 0; } -/* - * First Level Translation Fault Handler - * - * We enter here because the first level page table doesn't contain a valid - * entry for the address. - * - * If the address is in kernel space (>= TASK_SIZE), then we are probably - * faulting in the vmalloc() area. - * - * If the init_task's first level page tables contains the relevant entry, we - * copy the it to this task. If not, we send the process a signal, fixup the - * exception, or oops the kernel. - * - * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt - * or a critical region, and should only copy the information from the master - * page table, nothing more. - */ static int __kprobes do_translation_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -594,18 +564,11 @@ static int do_alignment_fault(unsigned long addr, unsigned int esr, return 0; } -/* - * This abort handler always returns "fault". - */ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - return 1; + return 1; /* "fault" */ } -/* - * This abort handler deals with Synchronous External Abort. - * It calls notifiers, and then returns "fault". - */ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct siginfo info; @@ -710,13 +673,6 @@ static const struct fault_info fault_info[] = { { do_bad, SIGBUS, 0, "unknown 63" }, }; -/* - * Handle Synchronous External Aborts that occur in a guest kernel. - * - * The return value will be zero if the SEA was successfully handled - * and non-zero if there was an error processing the error or there was - * no error to process. - */ int handle_guest_sea(phys_addr_t addr, unsigned int esr) { int ret = -ENOENT; @@ -727,9 +683,6 @@ int handle_guest_sea(phys_addr_t addr, unsigned int esr) return ret; } -/* - * Dispatch a data abort to the relevant handler. - */ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -751,9 +704,6 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } -/* - * Handle stack alignment exceptions. - */ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) -- cgit From c2f0b54f10b12620c57f6e31233589b704a00ed5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 14 Sep 2017 20:40:04 +0900 Subject: arm64: remove unneeded copy to init_utsname()->machine As you see in init/version.c, init_uts_ns.name.machine is initially set to UTS_MACHINE. There is no point to copy the same string. I dug the git history to figure out why this line is here. My best guess is like this: - This line has been around here since the initial support of arm64 by commit 9703d9d7f77c ("arm64: Kernel booting and initialisation"). If ARCH (=arm64) and UTS_MACHINE (=aarch64) do not match, arch/$(ARCH)/Makefile is supposed to override UTS_MACHINE, but the initial version of arch/arm64/Makefile missed to do that. Instead, the boot code copied "aarch64" to init_utsname()->machine. - Commit 94ed1f2cb5d4 ("arm64: setup: report ELF_PLATFORM as the machine for utsname") replaced "aarch64" with ELF_PLATFORM to make "uname" to reflect the endianness. - ELF_PLATFORM does not help to provide the UTS machine name to rpm target, so commit cfa88c79462d ("arm64: Set UTS_MACHINE in the Makefile") fixed it. The commit simply replaced ELF_PLATFORM with UTS_MACHINE, but missed the fact the string copy itself is no longer needed. Signed-off-by: Masahiro Yamada Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 2 -- arch/arm64/kernel/setup.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index f2b4e816b6de..29764d797187 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -10,8 +10,6 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg -CFLAGS_setup.o = -DUTS_MACHINE='"$(UTS_MACHINE)"' - # Object file lists. arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index d4b740538ad5..b2fdb59b69f6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -246,7 +245,6 @@ void __init setup_arch(char **cmdline_p) { pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); - sprintf(init_utsname()->machine, UTS_MACHINE); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; -- cgit From b4f4a2755665bf847231050b569f587ce0945482 Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Thu, 21 Sep 2017 00:29:36 +0200 Subject: arm64: dma-mapping: Cocci spatch "vma_pages" Use vma_pages function on vma object instead of explicit computation. Found by coccinelle spatch "api/vma_pages.cocci" Reviewed-by: Robin Murphy Signed-off-by: Thomas Meyer Signed-off-by: Will Deacon --- arch/arm64/mm/dma-mapping.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 614af886b7ef..5f62090bda24 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -303,8 +303,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn, size_t size) { int ret = -ENXIO; - unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> - PAGE_SHIFT; + unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; -- cgit From 4adcec1164de863ef8bcf2cdffa0ea09de2a08d3 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 20 Sep 2017 13:49:59 -0700 Subject: arm64: Always use REFCOUNT_FULL As discussed at the Linux Security Summit, arm64 prefers to use REFCOUNT_FULL by default. This enables it for the architecture. Cc: hw.likun@huawei.com Cc: Catalin Marinas Acked-by: Ard Biesheuvel Signed-off-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d4..9fe7a7f4c94c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -119,6 +119,7 @@ config ARM64 select PCI_ECAM if ACPI select POWER_RESET select POWER_SUPPLY + select REFCOUNT_FULL select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK -- cgit From b65423ed466fb0494ef81a2c4177fa151732a33a Mon Sep 17 00:00:00 2001 From: Neil Leeder Date: Thu, 14 Sep 2017 15:30:13 -0400 Subject: perf: qcom_l2_pmu: add event names Add event names so that common events can be specified symbolically, for example: l2cache_0/total-reads/,l2cache_0/cycles/ Event names are displayed in 'perf list'. Signed-off-by: Neil Leeder Signed-off-by: Will Deacon --- drivers/perf/qcom_l2_pmu.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index b242cce10468..4fdc8486a8e4 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -92,6 +92,21 @@ #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) +/* + * Events + */ +#define L2_EVENT_CYCLES 0xfe +#define L2_EVENT_DCACHE_OPS 0x400 +#define L2_EVENT_ICACHE_OPS 0x401 +#define L2_EVENT_TLBI 0x402 +#define L2_EVENT_BARRIERS 0x403 +#define L2_EVENT_TOTAL_READS 0x405 +#define L2_EVENT_TOTAL_WRITES 0x406 +#define L2_EVENT_TOTAL_REQUESTS 0x407 +#define L2_EVENT_LDREX 0x420 +#define L2_EVENT_STREX 0x421 +#define L2_EVENT_CLREX 0x422 + static DEFINE_RAW_SPINLOCK(l2_access_lock); /** @@ -700,9 +715,12 @@ static struct attribute_group l2_cache_pmu_cpumask_group = { /* CCG format for perf RAW codes. */ PMU_FORMAT_ATTR(l2_code, "config:4-11"); PMU_FORMAT_ATTR(l2_group, "config:0-3"); +PMU_FORMAT_ATTR(event, "config:0-11"); + static struct attribute *l2_cache_pmu_formats[] = { &format_attr_l2_code.attr, &format_attr_l2_group.attr, + &format_attr_event.attr, NULL, }; @@ -711,9 +729,45 @@ static struct attribute_group l2_cache_pmu_format_group = { .attrs = l2_cache_pmu_formats, }; +static ssize_t l2cache_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define L2CACHE_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *l2_cache_pmu_events[] = { + L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES), + L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS), + L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS), + L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI), + L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS), + L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS), + L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES), + L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS), + L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX), + L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX), + L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX), + NULL +}; + +static struct attribute_group l2_cache_pmu_events_group = { + .name = "events", + .attrs = l2_cache_pmu_events, +}; + static const struct attribute_group *l2_cache_pmu_attr_grps[] = { &l2_cache_pmu_format_group, &l2_cache_pmu_cpumask_group, + &l2_cache_pmu_events_group, NULL, }; -- cgit From ccaac16287f9b46c58777f5538c4ba3a9d4c3aeb Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 27 Sep 2017 14:50:38 +0100 Subject: arm64: consistently log boot/secondary CPU IDs Currently we inconsistently log identifying information for the boot CPU and secondary CPUs. For the boot CPU, we log the MIDR and MPIDR across separate messages, whereas for the secondary CPUs we only log the MIDR. In some cases, it would be useful to know the MPIDR of secondary CPUs, and it would be nice for these messages to be consistent. This patch ensures that in the primary and secondary boot paths, we log both the MPIDR and MIDR in a single message, with a consistent format. the MPIDR is consistently padded to 10 hex characters to cover Aff3 in bits 39:32, so that IDs can be compared easily. The newly redundant message in setup_arch() is removed. Signed-off-by: Mark Rutland Cc: Al Stone Cc: Catalin Marinas Cc: Will Deacon [will: added '0x' prefixes consistently] Signed-off-by: Will Deacon --- arch/arm64/kernel/setup.c | 5 ++--- arch/arm64/kernel/smp.c | 6 ++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b2fdb59b69f6..4bab73e80e33 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -102,7 +102,8 @@ void __init smp_setup_processor_id(void) * access percpu variable inside lock_release */ set_my_cpu_offset(0); - pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr); + pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", + (unsigned long)mpidr, read_cpuid_id()); } bool arch_match_cpu_phys_id(int cpu, u64 phys_id) @@ -243,8 +244,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { - pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); - init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 9f7195a5773e..824561ef6b8a 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -216,6 +216,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) */ asmlinkage void secondary_start_kernel(void) { + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; unsigned int cpu; @@ -265,8 +266,9 @@ asmlinkage void secondary_start_kernel(void) * the CPU migration code to notice that the CPU is online * before we continue. */ - pr_info("CPU%u: Booted secondary processor [%08x]\n", - cpu, read_cpuid_id()); + pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", + cpu, (unsigned long)mpidr, + read_cpuid_id()); update_cpu_boot_status(CPU_BOOT_SUCCESS); set_cpu_online(cpu, true); complete(&cpu_running); -- cgit From d1809d0e64325def212825852b02c2c63b40f5cd Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Wed, 27 Sep 2017 14:30:24 +0800 Subject: drivers/perf: arm_pmu_acpi: drop redundant acpi_disabled check acpi_disabled has been checked in armv8_pmu_driver_init and it shall be ZERO in arm_pmu_acpi_probe, clean up this unnecessary check. Signed-off-by: Shaokun Zhang Signed-off-by: Will Deacon --- drivers/perf/arm_pmu_acpi.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 3303dd8d8eb5..705f1a390e31 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -193,9 +193,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) int pmu_idx = 0; int cpu, ret; - if (acpi_disabled) - return 0; - /* * Initialise and register the set of PMUs which we know about right * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we -- cgit From 396a5d4a5c32598aea5bff5cb2a995026a6f05dc Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 27 Sep 2017 08:51:30 -0700 Subject: arm64: Unconditionally support {ARCH_}HAVE_NMI{_SAFE_CMPXCHG} From what I can see there isn't anything about ACPI_APEI_SEA that means the arm64 architecture can or cannot support NMI safe cmpxchg or NMIs, so the 'if' condition here is not important. Let's remove it. Doing that allows us to support ftrace histograms via CONFIG_HIST_TRIGGERS that depends on the arch having the ARCH_HAVE_NMI_SAFE_CMPXCHG config selected. Cc: Tyler Baicar Cc: Jonathan (Zhixiong) Zhang Cc: Dongjiu Geng Acked-by: James Morse Signed-off-by: Stephen Boyd Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9fe7a7f4c94c..05aa5924420e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,7 +21,7 @@ config ARM64 select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_USE_CMPXCHG_LOCKREF select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_ATOMIC_RMW @@ -98,7 +98,7 @@ config ARM64 select HAVE_IRQ_TIME_ACCOUNTING select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP if NUMA - select HAVE_NMI if ACPI_APEI_SEA + select HAVE_NMI select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_REGS -- cgit From 359be67862c5e013fcf7c04e352d8824105a76ed Mon Sep 17 00:00:00 2001 From: Matthieu CASTET Date: Mon, 2 Oct 2017 15:01:55 +0200 Subject: dma mapping : export caller to vmallocinfo For example on arm64 board, this add info to "user" entries in vmallocinfo Before : [...] 0xffffff8008997000 0xffffff80089d8000 266240 user [...] Afer : [...] 0xffffff8008997000 0xffffff80089d8000 266240 atomic_pool_init+0x0/0x1d8 user [...] This help to debug mapping issues, and is consistent with others entries (ioremap, vmalloc, ...) that already provide caller. Acked-by: Catalin Marinas Signed-off-by: Matthieu CASTET Signed-off-by: Will Deacon --- arch/arm64/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 5f62090bda24..b45c5bcaeccb 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -166,7 +166,7 @@ static void *__dma_alloc(struct device *dev, size_t size, /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, NULL); + prot, __builtin_return_address(0)); if (!coherent_ptr) goto no_map; -- cgit From 0c3039ffc07cd7c328eca3f9559d0d0102ac9bec Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 6 Oct 2017 03:00:01 +0100 Subject: arm64: elf.h: Correct comment about READ_IMPLIES_EXEC propagation Process personality always propagates across a fork(), but can change at an execve(). Signed-off-by: Ben Hutchings Signed-off-by: Will Deacon --- arch/arm64/include/asm/elf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 33be513ef24c..fac1c4de7898 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -188,8 +188,8 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; #define compat_start_thread compat_start_thread /* - * Unlike the native SET_PERSONALITY macro, the compat version inherits - * READ_IMPLIES_EXEC across a fork() since this is the behaviour on + * Unlike the native SET_PERSONALITY macro, the compat version maintains + * READ_IMPLIES_EXEC across an execve() since this is the behaviour on * arch/arm/. */ #define COMPAT_SET_PERSONALITY(ex) \ -- cgit From f5e035f8694c3bdddc66ea46ecda965ee6853718 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 11 Oct 2017 14:01:02 +0100 Subject: arm64: Expose support for optional ARMv8-A features ARMv8-A adds a few optional features for ARMv8.2 and ARMv8.3. Expose them to the userspace via HWCAPs and mrs emulation. SHA2-512 - Instruction support for SHA512 Hash algorithm (e.g SHA512H, SHA512H2, SHA512U0, SHA512SU1) SHA3 - SHA3 crypto instructions (EOR3, RAX1, XAR, BCAX). SM3 - Instruction support for Chinese cryptography algorithm SM3 SM4 - Instruction support for Chinese cryptography algorithm SM4 DP - Dot Product instructions (UDOT, SDOT). Cc: Will Deacon Cc: Mark Rutland Cc: Dave Martin Cc: Marc Zyngier Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- Documentation/arm64/cpu-feature-registers.txt | 12 +++++++++++- arch/arm64/include/asm/sysreg.h | 4 ++++ arch/arm64/include/uapi/asm/hwcap.h | 5 +++++ arch/arm64/kernel/cpufeature.c | 9 +++++++++ arch/arm64/kernel/cpuinfo.c | 5 +++++ 5 files changed, 34 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt index dad411d635d8..011ddfc1e570 100644 --- a/Documentation/arm64/cpu-feature-registers.txt +++ b/Documentation/arm64/cpu-feature-registers.txt @@ -110,10 +110,20 @@ infrastructure: x--------------------------------------------------x | Name | bits | visible | |--------------------------------------------------| - | RES0 | [63-32] | n | + | RES0 | [63-48] | n | + |--------------------------------------------------| + | DP | [47-44] | y | + |--------------------------------------------------| + | SM4 | [43-40] | y | + |--------------------------------------------------| + | SM3 | [39-36] | y | + |--------------------------------------------------| + | SHA3 | [35-32] | y | |--------------------------------------------------| | RDM | [31-28] | y | |--------------------------------------------------| + | RES0 | [27-24] | n | + |--------------------------------------------------| | ATOMICS | [23-20] | y | |--------------------------------------------------| | CRC32 | [19-16] | y | diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f707fed5886f..32f489ba952f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -318,6 +318,10 @@ #define SCTLR_EL1_CP15BEN (1 << 5) /* id_aa64isar0 */ +#define ID_AA64ISAR0_DP_SHIFT 44 +#define ID_AA64ISAR0_SM4_SHIFT 40 +#define ID_AA64ISAR0_SM3_SHIFT 36 +#define ID_AA64ISAR0_SHA3_SHIFT 32 #define ID_AA64ISAR0_RDM_SHIFT 28 #define ID_AA64ISAR0_ATOMICS_SHIFT 20 #define ID_AA64ISAR0_CRC32_SHIFT 16 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 4b9344cba83a..a4bad90a3fa3 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -36,5 +36,10 @@ #define HWCAP_FCMA (1 << 14) #define HWCAP_LRCPC (1 << 15) #define HWCAP_DCPOP (1 << 16) +#define HWCAP_SHA3 (1 << 17) +#define HWCAP_SM3 (1 << 18) +#define HWCAP_SM4 (1 << 19) +#define HWCAP_ASIMDDP (1 << 20) +#define HWCAP_SHA512 (1 << 21) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cd52d365d1f0..3214ee44de56 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -107,6 +107,10 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), @@ -921,9 +925,14 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 311885962830..1ff1c5a67081 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -69,6 +69,11 @@ static const char *const hwcap_str[] = { "fcma", "lrcpc", "dcpop", + "sha3", + "sm3", + "sm4", + "asimddp", + "sha512", NULL }; -- cgit From 611a7bc74ed2dcbab6693c20bb534cfcf15f9d1d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 11 Oct 2017 14:01:03 +0100 Subject: arm64: docs: describe ELF hwcaps We don't document our ELF hwcaps, leaving developers to interpret them according to hearsay, guesswork, or (in exceptional cases) inspection of the current kernel code. This is less than optimal, and it would be far better if we had some definitive description of each of the ELF hwcaps that developers could refer to. This patch adds a document describing the (native) arm64 ELF hwcaps. Cc: Catalin Marinas Cc: Dave Martin Cc: Will Deacon Signed-off-by: Mark Rutland [ Updated new hwcap entries in the document ] Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- Documentation/arm64/elf_hwcaps.txt | 156 +++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 Documentation/arm64/elf_hwcaps.txt diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt new file mode 100644 index 000000000000..0ba180522e3c --- /dev/null +++ b/Documentation/arm64/elf_hwcaps.txt @@ -0,0 +1,156 @@ +ARM64 ELF hwcaps +================ + +This document describes the usage and semantics of the arm64 ELF hwcaps. + + +1. Introduction +--------------- + +Some hardware or software features are only available on some CPU +implementations, and/or with certain kernel configurations, but have no +architected discovery mechanism available to userspace code at EL0. The +kernel exposes the presence of these features to userspace through a set +of flags called hwcaps, exposed in the auxilliary vector. + +Userspace software can test for features by acquiring the AT_HWCAP entry +of the auxilliary vector, and testing whether the relevant flags are +set, e.g. + +bool floating_point_is_present(void) +{ + unsigned long hwcaps = getauxval(AT_HWCAP); + if (hwcaps & HWCAP_FP) + return true; + + return false; +} + +Where software relies on a feature described by a hwcap, it should check +the relevant hwcap flag to verify that the feature is present before +attempting to make use of the feature. + +Features cannot be probed reliably through other means. When a feature +is not available, attempting to use it may result in unpredictable +behaviour, and is not guaranteed to result in any reliable indication +that the feature is unavailable, such as a SIGILL. + + +2. Interpretation of hwcaps +--------------------------- + +The majority of hwcaps are intended to indicate the presence of features +which are described by architected ID registers inaccessible to +userspace code at EL0. These hwcaps are defined in terms of ID register +fields, and should be interpreted with reference to the definition of +these fields in the ARM Architecture Reference Manual (ARM ARM). + +Such hwcaps are described below in the form: + + Functionality implied by idreg.field == val. + +Such hwcaps indicate the availability of functionality that the ARM ARM +defines as being present when idreg.field has value val, but do not +indicate that idreg.field is precisely equal to val, nor do they +indicate the absence of functionality implied by other values of +idreg.field. + +Other hwcaps may indicate the presence of features which cannot be +described by ID registers alone. These may be described without +reference to ID registers, and may refer to other documentation. + + +3. The hwcaps exposed in AT_HWCAP +--------------------------------- + +HWCAP_FP + + Functionality implied by ID_AA64PFR0_EL1.FP == 0b0000. + +HWCAP_ASIMD + + Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0000. + +HWCAP_EVTSTRM + + The generic timer is configured to generate events at a frequency of + approximately 100KHz. + +HWCAP_AES + + Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0001. + +HWCAP_PMULL + + Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0010. + +HWCAP_SHA1 + + Functionality implied by ID_AA64ISAR0_EL1.SHA1 == 0b0001. + +HWCAP_SHA2 + + Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0001. + +HWCAP_CRC32 + + Functionality implied by ID_AA64ISAR0_EL1.CRC32 == 0b0001. + +HWCAP_ATOMICS + + Functionality implied by ID_AA64ISAR0_EL1.Atomic == 0b0010. + +HWCAP_FPHP + + Functionality implied by ID_AA64PFR0_EL1.FP == 0b0001. + +HWCAP_ASIMDHP + + Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0001. + +HWCAP_CPUID + + EL0 access to certain ID registers is available, to the extent + described by Documentation/arm64/cpu-feature-registers.txt. + + These ID registers may imply the availability of features. + +HWCAP_ASIMDRDM + + Functionality implied by ID_AA64ISAR0_EL1.RDM == 0b0001. + +HWCAP_JSCVT + + Functionality implied by ID_AA64ISAR1_EL1.JSCVT == 0b0001. + +HWCAP_FCMA + + Functionality implied by ID_AA64ISAR1_EL1.FCMA == 0b0001. + +HWCAP_LRCPC + + Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0001. + +HWCAP_DCPOP + + Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001. + +HWCAP_SHA3 + + Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001. + +HWCAP_SM3 + + Functionality implied by ID_AA64ISAR0_EL1.SM3 == 0b0001. + +HWCAP_SM4 + + Functionality implied by ID_AA64ISAR0_EL1.SM4 == 0b0001. + +HWCAP_ASIMDDP + + Functionality implied by ID_AA64ISAR0_EL1.DP == 0b0001. + +HWCAP_SHA512 + + Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002. -- cgit From ec5c8e429d07737ee94ee1fd2ae5029547484194 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 13 Oct 2017 14:32:55 +0100 Subject: arm_arch_timer: Expose event stream status The arch timer configuration for a CPU might get reset after suspending said CPU. In order to reliably use the event stream in the kernel (e.g. for delays), we keep track of the state where we can safely consider the event stream as properly configured. After writing to cntkctl, we issue an ISB to ensure that subsequent delay loops can rely on the event stream being enabled. Signed-off-by: Julien Thierry Acked-by: Mark Rutland Cc: Marc Zyngier Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm/include/asm/arch_timer.h | 1 + arch/arm64/include/asm/arch_timer.h | 1 + drivers/clocksource/arm_arch_timer.c | 25 ++++++++++++++++++++++--- include/clocksource/arm_arch_timer.h | 6 ++++++ 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index d4ebf5679f1f..0b6e1040f9e7 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -106,6 +106,7 @@ static inline u32 arch_timer_get_cntkctl(void) static inline void arch_timer_set_cntkctl(u32 cntkctl) { asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); + isb(); } #endif diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index a652ce0a5cb2..bdedd8f748d1 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -144,6 +144,7 @@ static inline u32 arch_timer_get_cntkctl(void) static inline void arch_timer_set_cntkctl(u32 cntkctl) { write_sysreg(cntkctl, cntkctl_el1); + isb(); } static inline u64 arch_counter_get_cntpct(void) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd0..13e6baa13a89 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -77,6 +77,7 @@ static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; static bool vdso_default = true; +static cpumask_t evtstrm_available = CPU_MASK_NONE; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); static int __init early_evtstrm_cfg(char *buf) @@ -740,6 +741,7 @@ static void arch_timer_evtstrm_enable(int divider) #ifdef CONFIG_COMPAT compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; #endif + cpumask_set_cpu(smp_processor_id(), &evtstrm_available); } static void arch_timer_configure_evtstream(void) @@ -864,6 +866,16 @@ u32 arch_timer_get_rate(void) return arch_timer_rate; } +bool arch_timer_evtstrm_available(void) +{ + /* + * We might get called from a preemptible context. This is fine + * because availability of the event stream should be always the same + * for a preemptible context and context where we might resume a task. + */ + return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); +} + static u64 arch_counter_get_cntvct_mem(void) { u32 vct_lo, vct_hi, tmp_hi; @@ -929,6 +941,8 @@ static int arch_timer_dying_cpu(unsigned int cpu) { struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); + cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); + arch_timer_stop(clk); return 0; } @@ -938,10 +952,16 @@ static DEFINE_PER_CPU(unsigned long, saved_cntkctl); static int arch_timer_cpu_pm_notify(struct notifier_block *self, unsigned long action, void *hcpu) { - if (action == CPU_PM_ENTER) + if (action == CPU_PM_ENTER) { __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); - else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) + + cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); + } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); + + if (elf_hwcap & HWCAP_EVTSTRM) + cpumask_set_cpu(smp_processor_id(), &evtstrm_available); + } return NOTIFY_OK; } @@ -1017,7 +1037,6 @@ static int __init arch_timer_register(void) if (err) goto out_unreg_notify; - /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, "clockevents/arm/arch_timer:starting", diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index cc805b72994a..4e28283e2ec6 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -93,6 +93,7 @@ struct arch_timer_mem { extern u32 arch_timer_get_rate(void); extern u64 (*arch_timer_read_counter)(void); extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void); +extern bool arch_timer_evtstrm_available(void); #else @@ -106,6 +107,11 @@ static inline u64 arch_timer_read_counter(void) return 0; } +static inline bool arch_timer_evtstrm_available(void) +{ + return false; +} + #endif #endif -- cgit From 7b77452ec524fa845efdec28ae16e2202cf797dc Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 13 Oct 2017 14:32:56 +0100 Subject: arm64: use WFE for long delays The current delay implementation uses the yield instruction, which is a hint that it is beneficial to schedule another thread. As this is a hint, it may be implemented as a NOP, causing all delays to be busy loops. This is the case for many existing CPUs. Taking advantage of the generic timer sending periodic events to all cores, we can use WFE during delays to reduce power consumption. This is beneficial only for delays longer than the period of the timer event stream. If timer event stream is not enabled, delays will behave as yield/busy loops. Signed-off-by: Julien Thierry Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/lib/delay.c | 23 +++++++++++++++++++---- include/clocksource/arm_arch_timer.h | 4 +++- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c index dad4ec9bbfd1..e48ac402e7be 100644 --- a/arch/arm64/lib/delay.c +++ b/arch/arm64/lib/delay.c @@ -24,10 +24,28 @@ #include #include +#include + +#define USECS_TO_CYCLES(time_usecs) \ + xloops_to_cycles((time_usecs) * 0x10C7UL) + +static inline unsigned long xloops_to_cycles(unsigned long xloops) +{ + return (xloops * loops_per_jiffy * HZ) >> 32; +} + void __delay(unsigned long cycles) { cycles_t start = get_cycles(); + if (arch_timer_evtstrm_available()) { + const cycles_t timer_evt_period = + USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US); + + while ((get_cycles() - start + timer_evt_period) < cycles) + wfe(); + } + while ((get_cycles() - start) < cycles) cpu_relax(); } @@ -35,10 +53,7 @@ EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { - unsigned long loops; - - loops = xloops * loops_per_jiffy * HZ; - __delay(loops >> 32); + __delay(xloops_to_cycles(xloops)); } EXPORT_SYMBOL(__const_udelay); diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 4e28283e2ec6..349e5957c949 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -67,7 +67,9 @@ enum arch_timer_spi_nr { #define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */ #define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */ -#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */ +#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100 +#define ARCH_TIMER_EVT_STREAM_FREQ \ + (USEC_PER_SEC / ARCH_TIMER_EVT_STREAM_PERIOD_US) struct arch_timer_kvm_info { struct timecounter timecounter; -- cgit From ee10b9c92758e9d7951b28cb6cd4221d51c1654f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 25 Sep 2017 12:54:43 +0530 Subject: acpi/arm64: pr_err() strings should end with newlines pr_err() messages should terminated with a new-line to avoid other messages being concatenated onto the end. Signed-off-by: Arvind Yadav Signed-off-by: Lorenzo Pieralisi Acked-by: Hanjun Guo --- drivers/acpi/arm64/gtdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index 597a737d538f..92f9edf9d11e 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -199,7 +199,7 @@ static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block, struct acpi_gtdt_timer_entry *gtdt_frame; if (!block->timer_count) { - pr_err(FW_BUG "GT block present, but frame count is zero."); + pr_err(FW_BUG "GT block present, but frame count is zero.\n"); return -ENODEV; } -- cgit From 758081312e316703ed6928c8bcdb9f0a84e2ee92 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 28 Sep 2017 13:57:10 +0100 Subject: ACPI/IORT: Remove leftover ACPI_IORT_SMMU_V3_PXM_VALID guard The conditional ACPI_IORT_SMMU_V3_PXM_VALID guard around arm_smmu_v3_set_proximity() was added to manage a cross tree ACPICA merge dependency; with ACPICA changes merged in: commit c944230064eb ("ACPICA: iasl: Update to IORT SMMUv3 disassembling") the guard has become useless. Remove it. Signed-off-by: Lorenzo Pieralisi Acked-by: Hanjun Guo Cc: Hanjun Guo Cc: Sudeep Holla Cc: Ganapatrao Kulkarni --- drivers/acpi/arm64/iort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 9565d572f8dd..89c42aa71858 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -968,7 +968,7 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; } -#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID) +#if defined(CONFIG_ACPI_NUMA) /* * set numa proximity domain for smmuv3 device */ -- cgit From e3d4939267925ab66f39123744ffb4bc74a13149 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 28 Sep 2017 14:03:33 +0100 Subject: ACPI/IORT: Improve functions return type/storage class specifier indentation Some functions definition indentations are using a style that is frowned upon with return value type/storage class specifier in a separate line. Reindent the function definitions to fix them. Signed-off-by: Lorenzo Pieralisi Acked-by: Hanjun Guo Cc: Hanjun Guo Cc: Sudeep Holla --- drivers/acpi/arm64/iort.c | 36 +++++++++++++++++------------------- include/linux/acpi_iort.h | 4 ++-- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 89c42aa71858..5b5630e52281 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -88,8 +88,8 @@ static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, * * Returns: fwnode_handle pointer on success, NULL on failure */ -static inline -struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) +static inline struct fwnode_handle *iort_get_fwnode( + struct acpi_iort_node *node) { struct iort_fwnode *curr; struct fwnode_handle *fwnode = NULL; @@ -306,9 +306,8 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return 0; } -static -struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, - u32 *id_out, int index) +static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, + u32 *id_out, int index) { struct acpi_iort_node *parent; struct acpi_iort_id_mapping *map; @@ -392,10 +391,9 @@ fail_map: return NULL; } -static -struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, - u32 *id_out, u8 type_mask, - int index) +static struct acpi_iort_node *iort_node_map_platform_id( + struct acpi_iort_node *node, u32 *id_out, u8 type_mask, + int index) { struct acpi_iort_node *parent; u32 id; @@ -623,14 +621,14 @@ static inline bool iort_iommu_driver_enabled(u8 type) } #ifdef CONFIG_IOMMU_API -static inline -const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +static inline const struct iommu_ops *iort_fwspec_iommu_ops( + struct iommu_fwspec *fwspec) { return (fwspec && fwspec->ops) ? fwspec->ops : NULL; } -static inline -int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +static inline int iort_add_device_replay(const struct iommu_ops *ops, + struct device *dev) { int err = 0; @@ -640,11 +638,11 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) return err; } #else -static inline -const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +static inline const struct iommu_ops *iort_fwspec_iommu_ops( + struct iommu_fwspec *fwspec) { return NULL; } -static inline -int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +static inline int iort_add_device_replay(const struct iommu_ops *ops, + struct device *dev) { return 0; } #endif @@ -1077,8 +1075,8 @@ static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { .iommu_init_resources = arm_smmu_init_resources }; -static __init -const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) +static __init const struct iort_iommu_config *iort_get_iommu_cfg( + struct acpi_iort_node *node) { switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 8d3f0bf80379..2f7a29242b87 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -49,8 +49,8 @@ static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size) { } -static inline -const struct iommu_ops *iort_iommu_configure(struct device *dev) +static inline const struct iommu_ops *iort_iommu_configure( + struct device *dev) { return NULL; } #endif -- cgit From 896dd2c3248422e1e7711cc582615844e2f9de86 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 20 Sep 2017 17:03:58 +0100 Subject: ACPI/IORT: Make platform devices initialization code SMMU agnostic The way current IORT code initializes platform devices for SMMU nodes is somewhat tied (mostly for naming convention) to the SMMU nodes themselves but it need not be in that it is completely generic and can easily be made so by structures renaming and code reshuffling. Rework IORT platform devices initialization code to make the functions and data structures SMMU agnostic. No functional changes intended. Signed-off-by: Lorenzo Pieralisi Acked-by: Hanjun Guo Cc: Hanjun Guo Cc: Sudeep Holla --- drivers/acpi/arm64/iort.c | 63 ++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 5b5630e52281..c395d6bf44d4 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1049,33 +1049,33 @@ static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; } -struct iort_iommu_config { +struct iort_dev_config { const char *name; - int (*iommu_init)(struct acpi_iort_node *node); - bool (*iommu_is_coherent)(struct acpi_iort_node *node); - int (*iommu_count_resources)(struct acpi_iort_node *node); - void (*iommu_init_resources)(struct resource *res, + int (*dev_init)(struct acpi_iort_node *node); + bool (*dev_is_coherent)(struct acpi_iort_node *node); + int (*dev_count_resources)(struct acpi_iort_node *node); + void (*dev_init_resources)(struct resource *res, struct acpi_iort_node *node); - void (*iommu_set_proximity)(struct device *dev, + void (*dev_set_proximity)(struct device *dev, struct acpi_iort_node *node); }; -static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { +static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { .name = "arm-smmu-v3", - .iommu_is_coherent = arm_smmu_v3_is_coherent, - .iommu_count_resources = arm_smmu_v3_count_resources, - .iommu_init_resources = arm_smmu_v3_init_resources, - .iommu_set_proximity = arm_smmu_v3_set_proximity, + .dev_is_coherent = arm_smmu_v3_is_coherent, + .dev_count_resources = arm_smmu_v3_count_resources, + .dev_init_resources = arm_smmu_v3_init_resources, + .dev_set_proximity = arm_smmu_v3_set_proximity, }; -static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { +static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { .name = "arm-smmu", - .iommu_is_coherent = arm_smmu_is_coherent, - .iommu_count_resources = arm_smmu_count_resources, - .iommu_init_resources = arm_smmu_init_resources + .dev_is_coherent = arm_smmu_is_coherent, + .dev_count_resources = arm_smmu_count_resources, + .dev_init_resources = arm_smmu_init_resources }; -static __init const struct iort_iommu_config *iort_get_iommu_cfg( +static __init const struct iort_dev_config *iort_get_dev_cfg( struct acpi_iort_node *node) { switch (node->type) { @@ -1089,31 +1089,28 @@ static __init const struct iort_iommu_config *iort_get_iommu_cfg( } /** - * iort_add_smmu_platform_device() - Allocate a platform device for SMMU - * @node: Pointer to SMMU ACPI IORT node + * iort_add_platform_device() - Allocate a platform device for IORT node + * @node: Pointer to device ACPI IORT node * * Returns: 0 on success, <0 failure */ -static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) +static int __init iort_add_platform_device(struct acpi_iort_node *node, + const struct iort_dev_config *ops) { struct fwnode_handle *fwnode; struct platform_device *pdev; struct resource *r; enum dev_dma_attr attr; int ret, count; - const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); - - if (!ops) - return -ENODEV; pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM; - if (ops->iommu_set_proximity) - ops->iommu_set_proximity(&pdev->dev, node); + if (ops->dev_set_proximity) + ops->dev_set_proximity(&pdev->dev, node); - count = ops->iommu_count_resources(node); + count = ops->dev_count_resources(node); r = kcalloc(count, sizeof(*r), GFP_KERNEL); if (!r) { @@ -1121,7 +1118,7 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) goto dev_put; } - ops->iommu_init_resources(r, node); + ops->dev_init_resources(r, node); ret = platform_device_add_resources(pdev, r, count); /* @@ -1156,8 +1153,8 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) pdev->dev.fwnode = fwnode; - attr = ops->iommu_is_coherent(node) ? - DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; + attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; /* Configure DMA for the page table walker */ acpi_dma_configure(&pdev->dev, attr); @@ -1182,6 +1179,7 @@ static void __init iort_init_platform_devices(void) struct acpi_table_iort *iort; struct fwnode_handle *fwnode; int i, ret; + const struct iort_dev_config *ops; /* * iort_table and iort both point to the start of IORT table, but @@ -1201,16 +1199,15 @@ static void __init iort_init_platform_devices(void) return; } - if ((iort_node->type == ACPI_IORT_NODE_SMMU) || - (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { - + ops = iort_get_dev_cfg(iort_node); + if (ops) { fwnode = acpi_alloc_fwnode_static(); if (!fwnode) return; iort_set_fwnode(iort_node, fwnode); - ret = iort_add_smmu_platform_device(iort_node); + ret = iort_add_platform_device(iort_node, ops); if (ret) { iort_delete_fwnode(iort_node); acpi_free_fwnode_static(fwnode); -- cgit From 0a71d8b95f8adc343d986bf3093fc1baf59df4b1 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Fri, 13 Oct 2017 15:09:47 +0800 Subject: ACPI/IORT: Look up IORT node through struct fwnode_handle pointer Current IORT code provides a function (ie iort_get_fwnode()) which looks up a struct fwnode_handle pointer through a struct acpi_iort_node pointer for SMMU components but it lacks a function that implements the reverse look-up, namely struct fwnode_handle* -> struct acpi_iort_node*. Devices that are not IORT named components cannot be retrieved through their associated IORT named component scan interface because they just are not represented in the ACPI namespace; the reverse look-up is therefore required for all platform devices that represent IORT nodes (eg SMMUs) so that the struct acpi_iort_node* can be retrieved from the struct device->fwnode pointer. Signed-off-by: Hanjun Guo [lorenzo.pieralisi@arm.com: re-indented/rewrote the commit log] Signed-off-by: Lorenzo Pieralisi --- drivers/acpi/arm64/iort.c | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index c395d6bf44d4..4ff57f2a0fd7 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -126,6 +126,31 @@ static inline void iort_delete_fwnode(struct acpi_iort_node *node) spin_unlock(&iort_fwnode_lock); } +/** + * iort_get_iort_node() - Retrieve iort_node associated with an fwnode + * + * @fwnode: fwnode associated with device to be looked-up + * + * Returns: iort_node pointer on success, NULL on failure + */ +static inline struct acpi_iort_node *iort_get_iort_node( + struct fwnode_handle *fwnode) +{ + struct iort_fwnode *curr; + struct acpi_iort_node *iort_node = NULL; + + spin_lock(&iort_fwnode_lock); + list_for_each_entry(curr, &iort_fwnode_list, list) { + if (curr->fwnode == fwnode) { + iort_node = curr->iort_node; + break; + } + } + spin_unlock(&iort_fwnode_lock); + + return iort_node; +} + typedef acpi_status (*iort_find_node_callback) (struct acpi_iort_node *node, void *context); @@ -422,9 +447,25 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) { struct pci_bus *pbus; - if (!dev_is_pci(dev)) + if (!dev_is_pci(dev)) { + struct acpi_iort_node *node; + /* + * scan iort_fwnode_list to see if it's an iort platform + * device (such as SMMU, PMCG),its iort node already cached + * and associated with fwnode when iort platform devices + * were initialized. + */ + node = iort_get_iort_node(dev->fwnode); + if (node) + return node; + + /* + * if not, then it should be a platform device defined in + * DSDT/SSDT (with Named Component node in IORT) + */ return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, iort_match_node_callback, dev); + } /* Find a PCI root bus */ pbus = to_pci_dev(dev)->bus; -- cgit From 8c8df8dcd69ea65cedb4d0b19c34a38c9fb68466 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Fri, 13 Oct 2017 15:09:48 +0800 Subject: ACPI/IORT: Enable special index ITS group mappings for IORT nodes IORT revision C introduced SMMUv3 and PMCG MSI support by adding specific mapping entries in the SMMUv3/PMCG subtables to retrieve the device ID and the ITS group it maps to for a given SMMUv3/PMCG IORT node. Introduce a mapping function (ie iort_get_id_mapping_index()), that for a given IORT node looks up if an ITS specific ID mapping entry exists and if so retrieve the corresponding mapping index in the IORT node mapping array. Since an ITS specific index mapping can be present for an IORT node that is not a leaf node (eg SMMUv3 - to describe its own ITS device ID) special handling is required for two steps mapping cases such as PCI/NamedComponent--->SMMUv3--->ITS because the SMMUv3 ITS specific index mapping entry should be skipped to prevent the IORT API from considering the mapping entry as a regular mapping one. If we take the following IORT topology example: |----------------------| | Root Complex Node | |----------------------| | map entry[x] | |----------------------| | id value | | output_reference | |---|------------------| | | |----------------------| |-->| SMMUv3 | |----------------------| | SMMUv3 dev ID | | mapping index 0 | |----------------------| | map entry[0] | |----------------------| | id value | | output_reference-----------> ITS 1 (SMMU MSI domain) |----------------------| | map entry[1] | |----------------------| | id value | | output_reference-----------> ITS 2 (PCI MSI domain) |----------------------| where the SMMUv3 ITS specific mapping entry is index 0 and it represents the SMMUv3 ITS specific index mapping entry (describing its own ITS device ID), we need to skip that mapping entry while carrying out the Root Complex Node regular mappings to prevent erroneous translations. Reuse the iort_get_id_mapping_index() function to detect the ITS specific mapping index for a specific IORT node and skip it in the IORT mapping API (ie iort_node_map_id()) loop to prevent considering it a normal PCI/Named Component ID mapping entry. Signed-off-by: Hanjun Guo [lorenzo.pieralisi@arm.com: split patch/rewrote commit log] Signed-off-by: Lorenzo Pieralisi --- drivers/acpi/arm64/iort.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 4ff57f2a0fd7..55fbf645a777 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -365,6 +365,11 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, return NULL; } +static inline int iort_get_id_mapping_index(struct acpi_iort_node *node) +{ + return -EINVAL; +} + static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, u32 id_in, u32 *id_out, u8 type_mask) @@ -374,7 +379,7 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, /* Parse the ID mapping tree to find specified node type */ while (node) { struct acpi_iort_id_mapping *map; - int i; + int i, index; if (IORT_TYPE_MASK(node->type) & type_mask) { if (id_out) @@ -395,8 +400,19 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, goto fail_map; } + /* + * Get the special ID mapping index (if any) and skip its + * associated ID map to prevent erroneous multi-stage + * IORT ID translations. + */ + index = iort_get_id_mapping_index(node); + /* Do the ID translation */ for (i = 0; i < node->mapping_count; i++, map++) { + /* if it is special mapping index, skip it */ + if (i == index) + continue; + if (!iort_id_map(map, node->type, id, &id)) break; } @@ -505,16 +521,24 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id) */ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) { - int i; + int i, index; struct acpi_iort_node *node; node = iort_find_dev_node(dev); if (!node) return -ENODEV; - for (i = 0; i < node->mapping_count; i++) { - if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) + index = iort_get_id_mapping_index(node); + /* if there is a valid index, go get the dev_id directly */ + if (index >= 0) { + if (iort_node_get_id(node, dev_id, index)) return 0; + } else { + for (i = 0; i < node->mapping_count; i++) { + if (iort_node_map_platform_id(node, dev_id, + IORT_MSI_TYPE, i)) + return 0; + } } return -ENODEV; -- cgit From 86456a3f19c505049341eeb51cf9bb874d3b4752 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Fri, 13 Oct 2017 15:09:49 +0800 Subject: ACPI/IORT: Add SMMUv3 specific special index mapping handling IORT revision C introduced a mapping entry binding to describe ITS device ID mapping for SMMUv3 MSI interrupts. Enable the single mapping flag (ie that is used by SMMUv3 component for its special index mappings) for the SMMUv3 node in the IORT mapping API and add IORT code to handle special index mapping entry for the SMMUv3 IORT nodes to enable their MSI interrupts. In case the ACPICA for SMMUv3 device ID mapping is not ready, use the ACPICA version as a guard for function iort_get_id_mapping_index(). Signed-off-by: Hanjun Guo [lorenzo.pieralisi@arm.com: patch split, typos fixing, rewrote the log] Signed-off-by: Lorenzo Pieralisi --- drivers/acpi/arm64/iort.c | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 55fbf645a777..3d7e3cd2eae8 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || - node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { + node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || + node->type == ACPI_IORT_NODE_SMMU_V3) { *id_out = map->output_base; return parent; } @@ -365,10 +366,46 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, return NULL; } +#if (ACPI_CA_VERSION > 0x20170929) +static int iort_get_id_mapping_index(struct acpi_iort_node *node) +{ + struct acpi_iort_smmu_v3 *smmu; + + switch (node->type) { + case ACPI_IORT_NODE_SMMU_V3: + /* + * SMMUv3 dev ID mapping index was introduced in revision 1 + * table, not available in revision 0 + */ + if (node->revision < 1) + return -EINVAL; + + smmu = (struct acpi_iort_smmu_v3 *)node->node_data; + /* + * ID mapping index is only ignored if all interrupts are + * GSIV based + */ + if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv + && smmu->sync_gsiv) + return -EINVAL; + + if (smmu->id_mapping_index >= node->mapping_count) { + pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", + node, node->type); + return -EINVAL; + } + + return smmu->id_mapping_index; + default: + return -EINVAL; + } +} +#else static inline int iort_get_id_mapping_index(struct acpi_iort_node *node) { return -EINVAL; } +#endif static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, u32 id_in, u32 *id_out, -- cgit From 65637901a3409f8a7952750e975536bde70fa1f8 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 13 Oct 2017 15:09:50 +0800 Subject: ACPI/IORT: Enable SMMUv3/PMCG IORT MSI domain set-up ITS specific mappings for SMMUv3/PMCG components can be retrieved through special index mapping entries introduced in IORT revision C. Introduce a new API iort_set_device_domain() to set the MSI domain for SMMUv3/PMCG nodes (extendable to any future IORT node requiring special index ITS mapping entries) that represent MSI through special index mappings in order to enable MSI support for the devices their nodes represent. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Hanjun Guo --- drivers/acpi/arm64/iort.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 3d7e3cd2eae8..7dc964f4d8f1 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -638,6 +638,49 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); } +static void iort_set_device_domain(struct device *dev, + struct acpi_iort_node *node) +{ + struct acpi_iort_its_group *its; + struct acpi_iort_node *msi_parent; + struct acpi_iort_id_mapping *map; + struct fwnode_handle *iort_fwnode; + struct irq_domain *domain; + int index; + + index = iort_get_id_mapping_index(node); + if (index < 0) + return; + + map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, + node->mapping_offset + index * sizeof(*map)); + + /* Firmware bug! */ + if (!map->output_reference || + !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { + pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", + node, node->type); + return; + } + + msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, + map->output_reference); + + if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) + return; + + /* Move to ITS specific data */ + its = (struct acpi_iort_its_group *)msi_parent->node_data; + + iort_fwnode = iort_find_domain_token(its->identifiers[0]); + if (!iort_fwnode) + return; + + domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); + if (domain) + dev_set_msi_domain(dev, domain); +} + /** * iort_get_platform_device_domain() - Find MSI domain related to a * platform device @@ -1261,6 +1304,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, /* Configure DMA for the page table walker */ acpi_dma_configure(&pdev->dev, attr); + iort_set_device_domain(&pdev->dev, node); + ret = platform_device_add(pdev); if (ret) goto dma_deconfigure; -- cgit From 5ffeb0501c6b36d080de78372fdb70b404b91e9d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 25 Jul 2016 16:07:10 +0100 Subject: genirq: export irq_get_percpu_devid_partition to modules Any modular driver using cluster-affine PPIs needs to be able to call irq_get_percpu_devid_partition so that it can enable the IRQ on the correct subset of CPUs. This patch exports the symbol so that it can be called from within a module. Acked-by: Marc Zyngier Acked-by: Thomas Gleixner Signed-off-by: Will Deacon --- kernel/irq/irqdesc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 82afb7ed369f..694c1a9d6485 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -863,6 +863,7 @@ int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) return 0; } +EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); void kstat_incr_irq_this_cpu(unsigned int irq) { -- cgit From bc1d202023eb66f088f736ba423bee1cf135c720 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 16 Aug 2016 16:53:15 +0100 Subject: perf/core: Export AUX buffer helpers to modules Perf PMU drivers using AUX buffers cannot be built as modules unless the AUX helpers are exported. This patch exports perf_aux_output_{begin,end,skip} and perf_get_aux to modules. Cc: Peter Zijlstra Signed-off-by: Will Deacon --- kernel/events/ring_buffer.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index f684d8e5fa2b..6d9bffe4d6cc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -411,6 +411,7 @@ err: return NULL; } +EXPORT_SYMBOL_GPL(perf_aux_output_begin); static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb) { @@ -480,6 +481,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) rb_free_aux(rb); ring_buffer_put(rb); } +EXPORT_SYMBOL_GPL(perf_aux_output_end); /* * Skip over a given number of bytes in the AUX buffer, due to, for example, @@ -505,6 +507,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) return 0; } +EXPORT_SYMBOL_GPL(perf_aux_output_skip); void *perf_get_aux(struct perf_output_handle *handle) { @@ -514,6 +517,7 @@ void *perf_get_aux(struct perf_output_handle *handle) return handle->rb->aux_priv; } +EXPORT_SYMBOL_GPL(perf_get_aux); #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) -- cgit From 085b30625e39df67d7320f22269796276c6b0c11 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 23 Sep 2016 14:05:12 +0100 Subject: perf/core: Add PERF_AUX_FLAG_COLLISION to report colliding samples The ARM SPE architecture permits an implementation to ignore a sample if the sample is due to be taken whilst another sample is already being produced. In this case, it is desirable to report the collision to userspace, as they may want to lower the sample period. This patch adds a PERF_AUX_FLAG_COLLISION flag, so that such events can be relayed to userspace. Acked-by: Peter Zijlstra Signed-off-by: Will Deacon --- include/uapi/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 140ae638cfd6..7ca1b22ea417 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -941,6 +941,7 @@ enum perf_callchain_context { #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ #define PERF_FLAG_FD_NO_GROUP (1UL << 0) #define PERF_FLAG_FD_OUTPUT (1UL << 1) -- cgit From a173c390d9c130d1dd254a9246fc7ffe4a2869f7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 20 Sep 2017 16:48:33 +0100 Subject: arm64: sysreg: Move SPE registers and PSB into common header files SPE is part of the v8.2 architecture, so move its system register and field definitions into sysreg.h and the new PSB barrier into barrier.h Finally, move KVM over to using the generic definitions so that it doesn't have to open-code its own versions. Acked-by: Marc Zyngier Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/barrier.h | 2 + arch/arm64/include/asm/sysreg.h | 93 ++++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/debug-sr.c | 24 +++-------- 3 files changed, 102 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fe7e43b7fbc..77651c49ef44 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -31,6 +31,8 @@ #define dmb(opt) asm volatile("dmb " #opt : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") +#define psb_csync() asm volatile("hint #17" : : : "memory") + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f707fed5886f..94b7a0d42aad 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -172,6 +172,99 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) +/*** Statistical Profiling Extension ***/ +/* ID registers */ +#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) +#define SYS_PMSIDR_EL1_FE_SHIFT 0 +#define SYS_PMSIDR_EL1_FT_SHIFT 1 +#define SYS_PMSIDR_EL1_FL_SHIFT 2 +#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 +#define SYS_PMSIDR_EL1_LDS_SHIFT 4 +#define SYS_PMSIDR_EL1_ERND_SHIFT 5 +#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL +#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL +#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL + +#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) +#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 +#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU +#define SYS_PMBIDR_EL1_P_SHIFT 4 +#define SYS_PMBIDR_EL1_F_SHIFT 5 + +/* Sampling controls */ +#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) +#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 +#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 +#define SYS_PMSCR_EL1_CX_SHIFT 3 +#define SYS_PMSCR_EL1_PA_SHIFT 4 +#define SYS_PMSCR_EL1_TS_SHIFT 5 +#define SYS_PMSCR_EL1_PCT_SHIFT 6 + +#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) +#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 +#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 +#define SYS_PMSCR_EL2_CX_SHIFT 3 +#define SYS_PMSCR_EL2_PA_SHIFT 4 +#define SYS_PMSCR_EL2_TS_SHIFT 5 +#define SYS_PMSCR_EL2_PCT_SHIFT 6 + +#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) + +#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) +#define SYS_PMSIRR_EL1_RND_SHIFT 0 +#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 +#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL + +/* Filtering controls */ +#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) +#define SYS_PMSFCR_EL1_FE_SHIFT 0 +#define SYS_PMSFCR_EL1_FT_SHIFT 1 +#define SYS_PMSFCR_EL1_FL_SHIFT 2 +#define SYS_PMSFCR_EL1_B_SHIFT 16 +#define SYS_PMSFCR_EL1_LD_SHIFT 17 +#define SYS_PMSFCR_EL1_ST_SHIFT 18 + +#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) +#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL + +#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) +#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 + +/* Buffer controls */ +#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) +#define SYS_PMBLIMITR_EL1_E_SHIFT 0 +#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 +#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL +#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) + +#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) + +/* Buffer error reporting */ +#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) +#define SYS_PMBSR_EL1_COLL_SHIFT 16 +#define SYS_PMBSR_EL1_S_SHIFT 17 +#define SYS_PMBSR_EL1_EA_SHIFT 18 +#define SYS_PMBSR_EL1_DL_SHIFT 19 +#define SYS_PMBSR_EL1_EC_SHIFT 26 +#define SYS_PMBSR_EL1_EC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) +#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) + +#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 +#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 +#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL + +#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) + +/*** End of Statistical Profiling Extension ***/ + #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index f5154ed3da6c..321c9c05dd9e 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -65,16 +65,6 @@ default: write_debug(ptr[0], reg, 0); \ } -#define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) - -#define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define PMBLIMITR_EL1_E BIT(0) - -#define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define PMBIDR_EL1_P BIT(4) - -#define psb_csync() asm volatile("hint #17") - static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1) { /* The vcpu can run. but it can't hide. */ @@ -90,18 +80,18 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) return; /* Yes; is it owned by EL3? */ - reg = read_sysreg_s(PMBIDR_EL1); - if (reg & PMBIDR_EL1_P) + reg = read_sysreg_s(SYS_PMBIDR_EL1); + if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) return; /* No; is the host actually using the thing? */ - reg = read_sysreg_s(PMBLIMITR_EL1); - if (!(reg & PMBLIMITR_EL1_E)) + reg = read_sysreg_s(SYS_PMBLIMITR_EL1); + if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) return; /* Yes; save the control register and disable data generation */ - *pmscr_el1 = read_sysreg_s(PMSCR_EL1); - write_sysreg_s(0, PMSCR_EL1); + *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1); + write_sysreg_s(0, SYS_PMSCR_EL1); isb(); /* Now drain all buffered data to memory */ @@ -122,7 +112,7 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1) isb(); /* Re-enable data generation */ - write_sysreg_s(pmscr_el1, PMSCR_EL1); + write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); } void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, -- cgit From b0c57e1071b51746ef0c960166f4527a3b3f0b6d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Jul 2017 13:47:02 +0100 Subject: arm64: head: Init PMSCR_EL2.{PA,PCT} when entered at EL2 without VHE When booting at EL2, ensure that we permit the EL1 host to sample physical addresses and physical counter values using SPE. Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0b243ecaf7ac..cfa90a43fbe7 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -480,14 +480,21 @@ set_hcr: /* Statistical profiling */ ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer - cbz x0, 6f // Skip if SPE not present - cbnz x2, 5f // VHE? + cbz x0, 7f // Skip if SPE not present + cbnz x2, 6f // VHE? + mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, + and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + cbnz x4, 5f // then permit sampling of physical + mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ + 1 << SYS_PMSCR_EL2_PA_SHIFT) + msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter +5: mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) orr x3, x3, x1 // If we don't have VHE, then - b 6f // use EL1&0 translation. -5: // For VHE, use EL2 translation + b 7f // use EL1&0 translation. +6: // For VHE, use EL2 translation orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 -6: +7: msr mdcr_el2, x3 // Configure debug traps /* Stage-2 translation */ -- cgit From 4b8b77a4ec807a5f14e02e84b1bb7a2fd43768c0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Sep 2016 11:48:19 +0100 Subject: dt-bindings: Document devicetree binding for ARM SPE This patch documents the devicetree binding in use for ARM SPE. Cc: Rob Herring Acked-by: Mark Rutland Signed-off-by: Will Deacon --- Documentation/devicetree/bindings/arm/spe-pmu.txt | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/spe-pmu.txt diff --git a/Documentation/devicetree/bindings/arm/spe-pmu.txt b/Documentation/devicetree/bindings/arm/spe-pmu.txt new file mode 100644 index 000000000000..93372f2a7df9 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/spe-pmu.txt @@ -0,0 +1,20 @@ +* ARMv8.2 Statistical Profiling Extension (SPE) Performance Monitor Units (PMU) + +ARMv8.2 introduces the optional Statistical Profiling Extension for collecting +performance sample data using an in-memory trace buffer. + +** SPE Required properties: + +- compatible : should be one of: + "arm,statistical-profiling-extension-v1" + +- interrupts : Exactly 1 PPI must be listed. For heterogeneous systems where + SPE is only supported on a subset of the CPUs, please consult + the arm,gic-v3 binding for details on describing a PPI partition. + +** Example: + +spe-pmu { + compatible = "arm,statistical-profiling-extension-v1"; + interrupts = ; +}; -- cgit From d5d9696b03808bc6be723cc85288c912c3a05606 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Sep 2016 11:36:32 +0100 Subject: drivers/perf: Add support for ARMv8.2 Statistical Profiling Extension The ARMv8.2 architecture introduces the optional Statistical Profiling Extension (SPE). SPE can be used to profile a population of operations in the CPU pipeline after instruction decode. These are either architected instructions (i.e. a dynamic instruction trace) or CPU-specific uops and the choice is fixed statically in the hardware and advertised to userspace via caps/. Sampling is controlled using a sampling interval, similar to a regular PMU counter, but also with an optional random perturbation to avoid falling into patterns where you continuously profile the same instruction in a hot loop. After each operation is decoded, the interval counter is decremented. When it hits zero, an operation is chosen for profiling and tracked within the pipeline until it retires. Along the way, information such as TLB lookups, cache misses, time spent to issue etc is captured in the form of a sample. The sample is then filtered according to certain criteria (e.g. load latency) that can be specified in the event config (described under format/) and, if the sample satisfies the filter, it is written out to memory as a record, otherwise it is discarded. Only one operation can be sampled at a time. The in-memory buffer is linear and virtually addressed, raising an interrupt when it fills up. The PMU driver handles these interrupts to give the appearance of a ring buffer, as expected by the AUX code. The in-memory trace-like format is self-describing (though not parseable in reverse) and written as a series of records, with each record corresponding to a sample and consisting of a sequence of packets. These packets are defined by the architecture, although some have CPU-specific fields for recording information specific to the microarchitecture. As a simple example, a record generated for a branch instruction may consist of the following packets: 0 (Address) : Virtual PC of the branch instruction 1 (Type) : Conditional direct branch 2 (Counter) : Number of cycles taken from Dispatch to Issue 3 (Address) : Virtual branch target + condition flags 4 (Counter) : Number of cycles taken from Dispatch to Complete 5 (Events) : Mispredicted as not-taken 6 (END) : End of record It is also possible to toggle properties such as timestamp packets in each record. This patch adds support for SPE in the form of a new perf driver. Cc: Alexander Shishkin Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- drivers/perf/Kconfig | 8 + drivers/perf/Makefile | 1 + drivers/perf/arm_spe_pmu.c | 1248 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1257 insertions(+) create mode 100644 drivers/perf/arm_spe_pmu.c diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index e5197ffb7422..43525a9dc947 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -43,4 +43,12 @@ config XGENE_PMU help Say y if you want to use APM X-Gene SoC performance monitors. +config ARM_SPE_PMU + tristate "Enable support for the ARMv8.2 Statistical Profiling Extension" + depends on PERF_EVENTS && ARM64 + help + Enable perf support for the ARMv8.2 Statistical Profiling + Extension, which provides periodic sampling of operations in + the CPU pipeline and reports this via the perf AUX interface. + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 6420bd4394d5..eaee60cf4b1b 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o +obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c new file mode 100644 index 000000000000..50511b13fd35 --- /dev/null +++ b/drivers/perf/arm_spe_pmu.c @@ -0,0 +1,1248 @@ +/* + * Perf support for the Statistical Profiling Extension, introduced as + * part of ARMv8.2. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Copyright (C) 2016 ARM Limited + * + * Author: Will Deacon + */ + +#define PMUNAME "arm_spe" +#define DRVNAME PMUNAME "_pmu" +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ARM_SPE_BUF_PAD_BYTE 0 + +struct arm_spe_pmu_buf { + int nr_pages; + bool snapshot; + void *base; +}; + +struct arm_spe_pmu { + struct pmu pmu; + struct platform_device *pdev; + cpumask_t supported_cpus; + struct hlist_node hotplug_node; + + int irq; /* PPI */ + + u16 min_period; + u16 counter_sz; + +#define SPE_PMU_FEAT_FILT_EVT (1UL << 0) +#define SPE_PMU_FEAT_FILT_TYP (1UL << 1) +#define SPE_PMU_FEAT_FILT_LAT (1UL << 2) +#define SPE_PMU_FEAT_ARCH_INST (1UL << 3) +#define SPE_PMU_FEAT_LDS (1UL << 4) +#define SPE_PMU_FEAT_ERND (1UL << 5) +#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63) + u64 features; + + u16 max_record_sz; + u16 align; + struct perf_output_handle __percpu *handle; +}; + +#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu)) + +/* Convert a free-running index from perf into an SPE buffer offset */ +#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) + +/* Keep track of our dynamic hotplug state */ +static enum cpuhp_state arm_spe_pmu_online; + +enum arm_spe_pmu_buf_fault_action { + SPE_PMU_BUF_FAULT_ACT_SPURIOUS, + SPE_PMU_BUF_FAULT_ACT_FATAL, + SPE_PMU_BUF_FAULT_ACT_OK, +}; + +/* This sysfs gunk was really good fun to write. */ +enum arm_spe_pmu_capabilities { + SPE_PMU_CAP_ARCH_INST = 0, + SPE_PMU_CAP_ERND, + SPE_PMU_CAP_FEAT_MAX, + SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX, + SPE_PMU_CAP_MIN_IVAL, +}; + +static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = { + [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST, + [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND, +}; + +static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap) +{ + if (cap < SPE_PMU_CAP_FEAT_MAX) + return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]); + + switch (cap) { + case SPE_PMU_CAP_CNT_SZ: + return spe_pmu->counter_sz; + case SPE_PMU_CAP_MIN_IVAL: + return spe_pmu->min_period; + default: + WARN(1, "unknown cap %d\n", cap); + } + + return 0; +} + +static ssize_t arm_spe_pmu_cap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); + struct dev_ext_attribute *ea = + container_of(attr, struct dev_ext_attribute, attr); + int cap = (long)ea->var; + + return snprintf(buf, PAGE_SIZE, "%u\n", + arm_spe_pmu_cap_get(spe_pmu, cap)); +} + +#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \ + &((struct dev_ext_attribute[]) { \ + { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \ + })[0].attr.attr + +#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \ + SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var) + +static struct attribute *arm_spe_pmu_cap_attr[] = { + SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST), + SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND), + SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ), + SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL), + NULL, +}; + +static struct attribute_group arm_spe_pmu_cap_group = { + .name = "caps", + .attrs = arm_spe_pmu_cap_attr, +}; + +/* User ABI */ +#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */ +#define ATTR_CFG_FLD_ts_enable_LO 0 +#define ATTR_CFG_FLD_ts_enable_HI 0 +#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */ +#define ATTR_CFG_FLD_pa_enable_LO 1 +#define ATTR_CFG_FLD_pa_enable_HI 1 +#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */ +#define ATTR_CFG_FLD_pct_enable_LO 2 +#define ATTR_CFG_FLD_pct_enable_HI 2 +#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */ +#define ATTR_CFG_FLD_jitter_LO 16 +#define ATTR_CFG_FLD_jitter_HI 16 +#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */ +#define ATTR_CFG_FLD_branch_filter_LO 32 +#define ATTR_CFG_FLD_branch_filter_HI 32 +#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */ +#define ATTR_CFG_FLD_load_filter_LO 33 +#define ATTR_CFG_FLD_load_filter_HI 33 +#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */ +#define ATTR_CFG_FLD_store_filter_LO 34 +#define ATTR_CFG_FLD_store_filter_HI 34 + +#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */ +#define ATTR_CFG_FLD_event_filter_LO 0 +#define ATTR_CFG_FLD_event_filter_HI 63 + +#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */ +#define ATTR_CFG_FLD_min_latency_LO 0 +#define ATTR_CFG_FLD_min_latency_HI 11 + +/* Why does everything I do descend into this? */ +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi + +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) + +#define GEN_PMU_FORMAT_ATTR(name) \ + PMU_FORMAT_ATTR(name, \ + _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI)) + +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ + ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0)) + +#define ATTR_CFG_GET_FLD(attr, name) \ + _ATTR_CFG_GET_FLD(attr, \ + ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI) + +GEN_PMU_FORMAT_ATTR(ts_enable); +GEN_PMU_FORMAT_ATTR(pa_enable); +GEN_PMU_FORMAT_ATTR(pct_enable); +GEN_PMU_FORMAT_ATTR(jitter); +GEN_PMU_FORMAT_ATTR(branch_filter); +GEN_PMU_FORMAT_ATTR(load_filter); +GEN_PMU_FORMAT_ATTR(store_filter); +GEN_PMU_FORMAT_ATTR(event_filter); +GEN_PMU_FORMAT_ATTR(min_latency); + +static struct attribute *arm_spe_pmu_formats_attr[] = { + &format_attr_ts_enable.attr, + &format_attr_pa_enable.attr, + &format_attr_pct_enable.attr, + &format_attr_jitter.attr, + &format_attr_branch_filter.attr, + &format_attr_load_filter.attr, + &format_attr_store_filter.attr, + &format_attr_event_filter.attr, + &format_attr_min_latency.attr, + NULL, +}; + +static struct attribute_group arm_spe_pmu_format_group = { + .name = "format", + .attrs = arm_spe_pmu_formats_attr, +}; + +static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); + + return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus); +} +static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL); + +static struct attribute *arm_spe_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group arm_spe_pmu_group = { + .attrs = arm_spe_pmu_attrs, +}; + +static const struct attribute_group *arm_spe_pmu_attr_groups[] = { + &arm_spe_pmu_group, + &arm_spe_pmu_cap_group, + &arm_spe_pmu_format_group, + NULL, +}; + +/* Convert between user ABI and register values */ +static u64 arm_spe_event_to_pmscr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT; + + if (!attr->exclude_user) + reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT); + + if (!attr->exclude_kernel) + reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); + + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN)) + reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); + + return reg; +} + +static void arm_spe_event_sanitise_period(struct perf_event *event) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + u64 period = event->hw.sample_period; + u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK + << SYS_PMSIRR_EL1_INTERVAL_SHIFT; + + if (period < spe_pmu->min_period) + period = spe_pmu->min_period; + else if (period > max_period) + period = max_period; + else + period &= max_period; + + event->hw.sample_period = period; +} + +static u64 arm_spe_event_to_pmsirr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + arm_spe_event_sanitise_period(event); + + reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT; + reg |= event->hw.sample_period; + + return reg; +} + +static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT; + + if (reg) + reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT); + + if (ATTR_CFG_GET_FLD(attr, event_filter)) + reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT); + + if (ATTR_CFG_GET_FLD(attr, min_latency)) + reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT); + + return reg; +} + +static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + return ATTR_CFG_GET_FLD(attr, event_filter); +} + +static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + return ATTR_CFG_GET_FLD(attr, min_latency) + << SYS_PMSLATFR_EL1_MINLAT_SHIFT; +} + +static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len); + if (!buf->snapshot) + perf_aux_output_skip(handle, len); +} + +static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + u64 head = PERF_IDX2OFF(handle->head, buf); + u64 limit = buf->nr_pages * PAGE_SIZE; + + /* + * The trace format isn't parseable in reverse, so clamp + * the limit to half of the buffer size in snapshot mode + * so that the worst case is half a buffer of records, as + * opposed to a single record. + */ + if (head < limit >> 1) + limit >>= 1; + + /* + * If we're within max_record_sz of the limit, we must + * pad, move the head index and recompute the limit. + */ + if (limit - head < spe_pmu->max_record_sz) { + arm_spe_pmu_pad_buf(handle, limit - head); + handle->head = PERF_IDX2OFF(limit, buf); + limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head; + } + + return limit; +} + +static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + const u64 bufsize = buf->nr_pages * PAGE_SIZE; + u64 limit = bufsize; + u64 head, tail, wakeup; + + /* + * The head can be misaligned for two reasons: + * + * 1. The hardware left PMBPTR pointing to the first byte after + * a record when generating a buffer management event. + * + * 2. We used perf_aux_output_skip to consume handle->size bytes + * and CIRC_SPACE was used to compute the size, which always + * leaves one entry free. + * + * Deal with this by padding to the next alignment boundary and + * moving the head index. If we run out of buffer space, we'll + * reduce handle->size to zero and end up reporting truncation. + */ + head = PERF_IDX2OFF(handle->head, buf); + if (!IS_ALIGNED(head, spe_pmu->align)) { + unsigned long delta = roundup(head, spe_pmu->align) - head; + + delta = min(delta, handle->size); + arm_spe_pmu_pad_buf(handle, delta); + head = PERF_IDX2OFF(handle->head, buf); + } + + /* If we've run out of free space, then nothing more to do */ + if (!handle->size) + goto no_space; + + /* Compute the tail and wakeup indices now that we've aligned head */ + tail = PERF_IDX2OFF(handle->head + handle->size, buf); + wakeup = PERF_IDX2OFF(handle->wakeup, buf); + + /* + * Avoid clobbering unconsumed data. We know we have space, so + * if we see head == tail we know that the buffer is empty. If + * head > tail, then there's nothing to clobber prior to + * wrapping. + */ + if (head < tail) + limit = round_down(tail, PAGE_SIZE); + + /* + * Wakeup may be arbitrarily far into the future. If it's not in + * the current generation, either we'll wrap before hitting it, + * or it's in the past and has been handled already. + * + * If there's a wakeup before we wrap, arrange to be woken up by + * the page boundary following it. Keep the tail boundary if + * that's lower. + */ + if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) + limit = min(limit, round_up(wakeup, PAGE_SIZE)); + + if (limit > head) + return limit; + + arm_spe_pmu_pad_buf(handle, handle->size); +no_space: + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + perf_aux_output_end(handle, 0); + return 0; +} + +static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + u64 limit = __arm_spe_pmu_next_off(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + /* + * If the head has come too close to the end of the buffer, + * then pad to the end and recompute the limit. + */ + if (limit && (limit - head < spe_pmu->max_record_sz)) { + arm_spe_pmu_pad_buf(handle, limit - head); + limit = __arm_spe_pmu_next_off(handle); + } + + return limit; +} + +static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, + struct perf_event *event) +{ + u64 base, limit; + struct arm_spe_pmu_buf *buf; + + /* Start a new aux session */ + buf = perf_aux_output_begin(handle, event); + if (!buf) { + event->hw.state |= PERF_HES_STOPPED; + /* + * We still need to clear the limit pointer, since the + * profiler might only be disabled by virtue of a fault. + */ + limit = 0; + goto out_write_limit; + } + + limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) + : arm_spe_pmu_next_off(handle); + if (limit) + limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT); + + limit += (u64)buf->base; + base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); + write_sysreg_s(base, SYS_PMBPTR_EL1); + +out_write_limit: + write_sysreg_s(limit, SYS_PMBLIMITR_EL1); +} + +static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + u64 offset, size; + + offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base; + size = offset - PERF_IDX2OFF(handle->head, buf); + + if (buf->snapshot) + handle->head = offset; + + perf_aux_output_end(handle, size); +} + +static void arm_spe_pmu_disable_and_drain_local(void) +{ + /* Disable profiling at EL0 and EL1 */ + write_sysreg_s(0, SYS_PMSCR_EL1); + isb(); + + /* Drain any buffered data */ + psb_csync(); + dsb(nsh); + + /* Disable the profiling buffer */ + write_sysreg_s(0, SYS_PMBLIMITR_EL1); + isb(); +} + +/* IRQ handling */ +static enum arm_spe_pmu_buf_fault_action +arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) +{ + const char *err_str; + u64 pmbsr; + enum arm_spe_pmu_buf_fault_action ret; + + /* + * Ensure new profiling data is visible to the CPU and any external + * aborts have been resolved. + */ + psb_csync(); + dsb(nsh); + + /* Ensure hardware updates to PMBPTR_EL1 are visible */ + isb(); + + /* Service required? */ + pmbsr = read_sysreg_s(SYS_PMBSR_EL1); + if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT))) + return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; + + /* + * If we've lost data, disable profiling and also set the PARTIAL + * flag to indicate that the last record is corrupted. + */ + if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT)) + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | + PERF_AUX_FLAG_PARTIAL); + + /* Report collisions to userspace so that it can up the period */ + if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT)) + perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); + + /* We only expect buffer management events */ + switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) { + case SYS_PMBSR_EL1_EC_BUF: + /* Handled below */ + break; + case SYS_PMBSR_EL1_EC_FAULT_S1: + case SYS_PMBSR_EL1_EC_FAULT_S2: + err_str = "Unexpected buffer fault"; + goto out_err; + default: + err_str = "Unknown error code"; + goto out_err; + } + + /* Buffer management event */ + switch (pmbsr & + (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) { + case SYS_PMBSR_EL1_BUF_BSC_FULL: + ret = SPE_PMU_BUF_FAULT_ACT_OK; + goto out_stop; + default: + err_str = "Unknown buffer status code"; + } + +out_err: + pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n", + err_str, smp_processor_id(), pmbsr, + read_sysreg_s(SYS_PMBPTR_EL1), + read_sysreg_s(SYS_PMBLIMITR_EL1)); + ret = SPE_PMU_BUF_FAULT_ACT_FATAL; + +out_stop: + arm_spe_perf_aux_output_end(handle); + return ret; +} + +static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev) +{ + struct perf_output_handle *handle = dev; + struct perf_event *event = handle->event; + enum arm_spe_pmu_buf_fault_action act; + + if (!perf_get_aux(handle)) + return IRQ_NONE; + + act = arm_spe_pmu_buf_get_fault_act(handle); + if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) + return IRQ_NONE; + + /* + * Ensure perf callbacks have completed, which may disable the + * profiling buffer in response to a TRUNCATION flag. + */ + irq_work_run(); + + switch (act) { + case SPE_PMU_BUF_FAULT_ACT_FATAL: + /* + * If a fatal exception occurred then leaving the profiling + * buffer enabled is a recipe waiting to happen. Since + * fatal faults don't always imply truncation, make sure + * that the profiling buffer is disabled explicitly before + * clearing the syndrome register. + */ + arm_spe_pmu_disable_and_drain_local(); + break; + case SPE_PMU_BUF_FAULT_ACT_OK: + /* + * We handled the fault (the buffer was full), so resume + * profiling as long as we didn't detect truncation. + * PMBPTR might be misaligned, but we'll burn that bridge + * when we get to it. + */ + if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) { + arm_spe_perf_aux_output_begin(handle, event); + isb(); + } + break; + case SPE_PMU_BUF_FAULT_ACT_SPURIOUS: + /* We've seen you before, but GCC has the memory of a sieve. */ + break; + } + + /* The buffer pointers are now sane, so resume profiling. */ + write_sysreg_s(0, SYS_PMBSR_EL1); + return IRQ_HANDLED; +} + +/* Perf callbacks */ +static int arm_spe_pmu_event_init(struct perf_event *event) +{ + u64 reg; + struct perf_event_attr *attr = &event->attr; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + + /* This is, of course, deeply driver-specific */ + if (attr->type != event->pmu->type) + return -ENOENT; + + if (event->cpu >= 0 && + !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus)) + return -ENOENT; + + if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0) + return -EOPNOTSUPP; + + if (attr->exclude_idle) + return -EOPNOTSUPP; + + /* + * Feedback-directed frequency throttling doesn't work when we + * have a buffer of samples. We'd need to manually count the + * samples in the buffer when it fills up and adjust the event + * count to reflect that. Instead, just force the user to specify + * a sample period. + */ + if (attr->freq) + return -EINVAL; + + reg = arm_spe_event_to_pmsfcr(event); + if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) + return -EOPNOTSUPP; + + if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) + return -EOPNOTSUPP; + + if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) + return -EOPNOTSUPP; + + reg = arm_spe_event_to_pmscr(event); + if (!capable(CAP_SYS_ADMIN) && + (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | + BIT(SYS_PMSCR_EL1_CX_SHIFT) | + BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) + return -EACCES; + + return 0; +} + +static void arm_spe_pmu_start(struct perf_event *event, int flags) +{ + u64 reg; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); + + hwc->state = 0; + arm_spe_perf_aux_output_begin(handle, event); + if (hwc->state) + return; + + reg = arm_spe_event_to_pmsfcr(event); + write_sysreg_s(reg, SYS_PMSFCR_EL1); + + reg = arm_spe_event_to_pmsevfr(event); + write_sysreg_s(reg, SYS_PMSEVFR_EL1); + + reg = arm_spe_event_to_pmslatfr(event); + write_sysreg_s(reg, SYS_PMSLATFR_EL1); + + if (flags & PERF_EF_RELOAD) { + reg = arm_spe_event_to_pmsirr(event); + write_sysreg_s(reg, SYS_PMSIRR_EL1); + isb(); + reg = local64_read(&hwc->period_left); + write_sysreg_s(reg, SYS_PMSICR_EL1); + } + + reg = arm_spe_event_to_pmscr(event); + isb(); + write_sysreg_s(reg, SYS_PMSCR_EL1); +} + +static void arm_spe_pmu_stop(struct perf_event *event, int flags) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); + + /* If we're already stopped, then nothing to do */ + if (hwc->state & PERF_HES_STOPPED) + return; + + /* Stop all trace generation */ + arm_spe_pmu_disable_and_drain_local(); + + if (flags & PERF_EF_UPDATE) { + /* + * If there's a fault pending then ensure we contain it + * to this buffer, since we might be on the context-switch + * path. + */ + if (perf_get_aux(handle)) { + enum arm_spe_pmu_buf_fault_action act; + + act = arm_spe_pmu_buf_get_fault_act(handle); + if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) + arm_spe_perf_aux_output_end(handle); + else + write_sysreg_s(0, SYS_PMBSR_EL1); + } + + /* + * This may also contain ECOUNT, but nobody else should + * be looking at period_left, since we forbid frequency + * based sampling. + */ + local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1)); + hwc->state |= PERF_HES_UPTODATE; + } + + hwc->state |= PERF_HES_STOPPED; +} + +static int arm_spe_pmu_add(struct perf_event *event, int flags) +{ + int ret = 0; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu; + + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return -ENOENT; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) { + arm_spe_pmu_start(event, PERF_EF_RELOAD); + if (hwc->state & PERF_HES_STOPPED) + ret = -EINVAL; + } + + return ret; +} + +static void arm_spe_pmu_del(struct perf_event *event, int flags) +{ + arm_spe_pmu_stop(event, PERF_EF_UPDATE); +} + +static void arm_spe_pmu_read(struct perf_event *event) +{ +} + +static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages, + bool snapshot) +{ + int i; + struct page **pglist; + struct arm_spe_pmu_buf *buf; + + /* We need at least two pages for this to work. */ + if (nr_pages < 2) + return NULL; + + /* + * We require an even number of pages for snapshot mode, so that + * we can effectively treat the buffer as consisting of two equal + * parts and give userspace a fighting chance of getting some + * useful data out of it. + */ + if (!nr_pages || (snapshot && (nr_pages & 1))) + return NULL; + + if (cpu == -1) + cpu = raw_smp_processor_id(); + + buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu)); + if (!buf) + return NULL; + + pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); + if (!pglist) + goto out_free_buf; + + for (i = 0; i < nr_pages; ++i) { + struct page *page = virt_to_page(pages[i]); + + if (PagePrivate(page)) { + pr_warn("unexpected high-order page for auxbuf!"); + goto out_free_pglist; + } + + pglist[i] = virt_to_page(pages[i]); + } + + buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); + if (!buf->base) + goto out_free_pglist; + + buf->nr_pages = nr_pages; + buf->snapshot = snapshot; + + kfree(pglist); + return buf; + +out_free_pglist: + kfree(pglist); +out_free_buf: + kfree(buf); + return NULL; +} + +static void arm_spe_pmu_free_aux(void *aux) +{ + struct arm_spe_pmu_buf *buf = aux; + + vunmap(buf->base); + kfree(buf); +} + +/* Initialisation and teardown functions */ +static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) +{ + static atomic_t pmu_idx = ATOMIC_INIT(-1); + + int idx; + char *name; + struct device *dev = &spe_pmu->pdev->dev; + + spe_pmu->pmu = (struct pmu) { + .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, + .attr_groups = arm_spe_pmu_attr_groups, + /* + * We hitch a ride on the software context here, so that + * we can support per-task profiling (which is not possible + * with the invalid context as it doesn't get sched callbacks). + * This requires that userspace either uses a dummy event for + * perf_event_open, since the aux buffer is not setup until + * a subsequent mmap, or creates the profiling event in a + * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it + * once the buffer has been created. + */ + .task_ctx_nr = perf_sw_context, + .event_init = arm_spe_pmu_event_init, + .add = arm_spe_pmu_add, + .del = arm_spe_pmu_del, + .start = arm_spe_pmu_start, + .stop = arm_spe_pmu_stop, + .read = arm_spe_pmu_read, + .setup_aux = arm_spe_pmu_setup_aux, + .free_aux = arm_spe_pmu_free_aux, + }; + + idx = atomic_inc_return(&pmu_idx); + name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx); + return perf_pmu_register(&spe_pmu->pmu, name, -1); +} + +static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu) +{ + perf_pmu_unregister(&spe_pmu->pmu); +} + +static void __arm_spe_pmu_dev_probe(void *info) +{ + int fld; + u64 reg; + struct arm_spe_pmu *spe_pmu = info; + struct device *dev = &spe_pmu->pdev->dev; + + fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1), + ID_AA64DFR0_PMSVER_SHIFT); + if (!fld) { + dev_err(dev, + "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + /* Read PMBIDR first to determine whether or not we have access */ + reg = read_sysreg_s(SYS_PMBIDR_EL1); + if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) { + dev_err(dev, + "profiling buffer owned by higher exception level\n"); + return; + } + + /* Minimum alignment. If it's out-of-range, then fail the probe */ + fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK; + spe_pmu->align = 1 << fld; + if (spe_pmu->align > SZ_2K) { + dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + /* It's now safe to read PMSIDR and figure out what we've got */ + reg = read_sysreg_s(SYS_PMSIDR_EL1); + if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; + + if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; + + if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; + + if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; + + if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_LDS; + + if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_ERND; + + /* This field has a spaced out encoding, so just use a look-up */ + fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK; + switch (fld) { + case 0: + spe_pmu->min_period = 256; + break; + case 2: + spe_pmu->min_period = 512; + break; + case 3: + spe_pmu->min_period = 768; + break; + case 4: + spe_pmu->min_period = 1024; + break; + case 5: + spe_pmu->min_period = 1536; + break; + case 6: + spe_pmu->min_period = 2048; + break; + case 7: + spe_pmu->min_period = 3072; + break; + default: + dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n", + fld); + /* Fallthrough */ + case 8: + spe_pmu->min_period = 4096; + } + + /* Maximum record size. If it's out-of-range, then fail the probe */ + fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK; + spe_pmu->max_record_sz = 1 << fld; + if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { + dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK; + switch (fld) { + default: + dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", + fld); + /* Fallthrough */ + case 2: + spe_pmu->counter_sz = 12; + } + + dev_info(dev, + "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n", + cpumask_pr_args(&spe_pmu->supported_cpus), + spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); + + spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; + return; +} + +static void __arm_spe_pmu_reset_local(void) +{ + /* + * This is probably overkill, as we have no idea where we're + * draining any buffered data to... + */ + arm_spe_pmu_disable_and_drain_local(); + + /* Reset the buffer base pointer */ + write_sysreg_s(0, SYS_PMBPTR_EL1); + isb(); + + /* Clear any pending management interrupts */ + write_sysreg_s(0, SYS_PMBSR_EL1); + isb(); +} + +static void __arm_spe_pmu_setup_one(void *info) +{ + struct arm_spe_pmu *spe_pmu = info; + + __arm_spe_pmu_reset_local(); + enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE); +} + +static void __arm_spe_pmu_stop_one(void *info) +{ + struct arm_spe_pmu *spe_pmu = info; + + disable_percpu_irq(spe_pmu->irq); + __arm_spe_pmu_reset_local(); +} + +static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node) +{ + struct arm_spe_pmu *spe_pmu; + + spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return 0; + + __arm_spe_pmu_setup_one(spe_pmu); + return 0; +} + +static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) +{ + struct arm_spe_pmu *spe_pmu; + + spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return 0; + + __arm_spe_pmu_stop_one(spe_pmu); + return 0; +} + +static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu) +{ + int ret; + cpumask_t *mask = &spe_pmu->supported_cpus; + + /* Make sure we probe the hardware on a relevant CPU */ + ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1); + if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED)) + return -ENXIO; + + /* Request our PPIs (note that the IRQ is still disabled) */ + ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME, + spe_pmu->handle); + if (ret) + return ret; + + /* + * Register our hotplug notifier now so we don't miss any events. + * This will enable the IRQ for any supported CPUs that are already + * up. + */ + ret = cpuhp_state_add_instance(arm_spe_pmu_online, + &spe_pmu->hotplug_node); + if (ret) + free_percpu_irq(spe_pmu->irq, spe_pmu->handle); + + return ret; +} + +static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu) +{ + cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node); + free_percpu_irq(spe_pmu->irq, spe_pmu->handle); +} + +/* Driver and device probing */ +static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu) +{ + struct platform_device *pdev = spe_pmu->pdev; + int irq = platform_get_irq(pdev, 0); + + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ (%d)\n", irq); + return -ENXIO; + } + + if (!irq_is_percpu(irq)) { + dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq); + return -EINVAL; + } + + if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) { + dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq); + return -EINVAL; + } + + spe_pmu->irq = irq; + return 0; +} + +static const struct of_device_id arm_spe_pmu_of_match[] = { + { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 }, + { /* Sentinel */ }, +}; + +static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev) +{ + int ret; + struct arm_spe_pmu *spe_pmu; + struct device *dev = &pdev->dev; + + spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL); + if (!spe_pmu) { + dev_err(dev, "failed to allocate spe_pmu\n"); + return -ENOMEM; + } + + spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); + if (!spe_pmu->handle) + return -ENOMEM; + + spe_pmu->pdev = pdev; + platform_set_drvdata(pdev, spe_pmu); + + ret = arm_spe_pmu_irq_probe(spe_pmu); + if (ret) + goto out_free_handle; + + ret = arm_spe_pmu_dev_init(spe_pmu); + if (ret) + goto out_free_handle; + + ret = arm_spe_pmu_perf_init(spe_pmu); + if (ret) + goto out_teardown_dev; + + return 0; + +out_teardown_dev: + arm_spe_pmu_dev_teardown(spe_pmu); +out_free_handle: + free_percpu(spe_pmu->handle); + return ret; +} + +static int arm_spe_pmu_device_remove(struct platform_device *pdev) +{ + struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); + + arm_spe_pmu_perf_destroy(spe_pmu); + arm_spe_pmu_dev_teardown(spe_pmu); + free_percpu(spe_pmu->handle); + return 0; +} + +static struct platform_driver arm_spe_pmu_driver = { + .driver = { + .name = DRVNAME, + .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + }, + .probe = arm_spe_pmu_device_dt_probe, + .remove = arm_spe_pmu_device_remove, +}; + +static int __init arm_spe_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME, + arm_spe_pmu_cpu_startup, + arm_spe_pmu_cpu_teardown); + if (ret < 0) + return ret; + arm_spe_pmu_online = ret; + + ret = platform_driver_register(&arm_spe_pmu_driver); + if (ret) + cpuhp_remove_multi_state(arm_spe_pmu_online); + + return ret; +} + +static void __exit arm_spe_pmu_exit(void) +{ + platform_driver_unregister(&arm_spe_pmu_driver); + cpuhp_remove_multi_state(arm_spe_pmu_online); +} + +module_init(arm_spe_pmu_init); +module_exit(arm_spe_pmu_exit); + +MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension"); +MODULE_AUTHOR("Will Deacon "); +MODULE_LICENSE("GPL v2"); -- cgit From 3f7c86b2382ea721c4fe9a7532bf5dc1b414ec1a Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 17 Oct 2017 14:11:30 +0100 Subject: arm64: Update fault_info table with new exception types Based on: ARM Architecture Reference Manual, ARMv8 (DDI 0487B.b). ARMv8.1 introduces the optional feature ARMv8.1-TTHM which can trigger a new type of memory abort. This exception is triggered when hardware update of page table flags is not atomic in regards to other memory accesses. Replace the corresponding unknown entry with a more accurate one. Cf: Section D10.2.28 ESR_ELx, Exception Syndrome Register (p D10-2381), section D4.4.11 Restriction on memory types for hardware updates on page tables (p D4-2116 - D4-2117). ARMv8.2 does not add new exception types, however it is worth mentioning that when obligatory feature RAS (optional for ARMv8.{0,1}) is implemented, exceptions related to "Synchronous parity or ECC error on memory access, not on translation table walk" become reserved and should not occur. Signed-off-by: Julien Thierry Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d224c9d384a1..84a586dd063a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -631,14 +631,14 @@ static const struct fault_info fault_info[] = { { do_sea, SIGBUS, 0, "level 1 (translation table walk)" }, { do_sea, SIGBUS, 0, "level 2 (translation table walk)" }, { do_sea, SIGBUS, 0, "level 3 (translation table walk)" }, - { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, + { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, - { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, + { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_bad, SIGBUS, 0, "unknown 32" }, { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGBUS, 0, "unknown 34" }, @@ -656,7 +656,7 @@ static const struct fault_info fault_info[] = { { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, { do_bad, SIGBUS, 0, "TLB conflict abort" }, - { do_bad, SIGBUS, 0, "unknown 49" }, + { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, -- cgit From 3125b5b2a3b4400a2925b27ce1cede23f26b98dc Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:16 +0800 Subject: Documentation: perf: hisi: Documentation for HiSilicon SoC PMU driver This patch adds documentation for the uncore PMUs on HiSilicon SoC. Acked-by: Mark Rutland Reviewed-by: Jonathan Cameron Signed-off-by: Shaokun Zhang Signed-off-by: Anurup M Signed-off-by: Will Deacon --- Documentation/perf/hisi-pmu.txt | 53 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 Documentation/perf/hisi-pmu.txt diff --git a/Documentation/perf/hisi-pmu.txt b/Documentation/perf/hisi-pmu.txt new file mode 100644 index 000000000000..267a028b2741 --- /dev/null +++ b/Documentation/perf/hisi-pmu.txt @@ -0,0 +1,53 @@ +HiSilicon SoC uncore Performance Monitoring Unit (PMU) +====================================================== +The HiSilicon SoC chip includes various independent system device PMUs +such as L3 cache (L3C), Hydra Home Agent (HHA) and DDRC. These PMUs are +independent and have hardware logic to gather statistics and performance +information. + +The HiSilicon SoC encapsulates multiple CPU and IO dies. Each CPU cluster +(CCL) is made up of 4 cpu cores sharing one L3 cache; each CPU die is +called Super CPU cluster (SCCL) and is made up of 6 CCLs. Each SCCL has +two HHAs (0 - 1) and four DDRCs (0 - 3), respectively. + +HiSilicon SoC uncore PMU driver +--------------------------------------- +Each device PMU has separate registers for event counting, control and +interrupt, and the PMU driver shall register perf PMU drivers like L3C, +HHA and DDRC etc. The available events and configuration options shall +be described in the sysfs, see : +/sys/devices/hisi_sccl{X}_/, or +/sys/bus/event_source/devices/hisi_sccl{X}_. +The "perf list" command shall list the available events from sysfs. + +Each L3C, HHA and DDRC is registered as a separate PMU with perf. The PMU +name will appear in event listing as hisi_sccl_module. +where "sccl-id" is the identifier of the SCCL and "index-id" is the index of +module. +e.g. hisi_sccl3_l3c0/rd_hit_cpipe is READ_HIT_CPIPE event of L3C index #0 in +SCCL ID #3. +e.g. hisi_sccl1_hha0/rx_operations is RX_OPERATIONS event of HHA index #0 in +SCCL ID #1. + +The driver also provides a "cpumask" sysfs attribute, which shows the CPU core +ID used to count the uncore PMU event. + +Example usage of perf: +$# perf list +hisi_sccl3_l3c0/rd_hit_cpipe/ [kernel PMU event] +------------------------------------------ +hisi_sccl3_l3c0/wr_hit_cpipe/ [kernel PMU event] +------------------------------------------ +hisi_sccl1_l3c0/rd_hit_cpipe/ [kernel PMU event] +------------------------------------------ +hisi_sccl1_l3c0/wr_hit_cpipe/ [kernel PMU event] +------------------------------------------ + +$# perf stat -a -e hisi_sccl3_l3c0/rd_hit_cpipe/ sleep 5 +$# perf stat -a -e hisi_sccl3_l3c0/config=0x02/ sleep 5 + +The current driver does not support sampling. So "perf record" is unsupported. +Also attach to a task is unsupported as the events are all uncore. + +Note: Please contact the maintainer for a complete list of events supported for +the PMU devices in the SoC and its information if needed. -- cgit From 6ce4ef94195da926245b58311119ed9d52428fdc Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:17 +0800 Subject: perf: hisi: Add support for HiSilicon SoC uncore PMU driver This patch adds support HiSilicon SoC uncore PMU driver framework and interfaces. Acked-by: Mark Rutland Reviewed-by: Jonathan Cameron Signed-off-by: Shaokun Zhang Signed-off-by: Anurup M [will: Fix leader accounting in uncore group validation] Signed-off-by: Will Deacon --- drivers/perf/Kconfig | 7 + drivers/perf/Makefile | 1 + drivers/perf/hisilicon/Makefile | 1 + drivers/perf/hisilicon/hisi_uncore_pmu.c | 447 +++++++++++++++++++++++++++++++ drivers/perf/hisilicon/hisi_uncore_pmu.h | 102 +++++++ 5 files changed, 558 insertions(+) create mode 100644 drivers/perf/hisilicon/Makefile create mode 100644 drivers/perf/hisilicon/hisi_uncore_pmu.c create mode 100644 drivers/perf/hisilicon/hisi_uncore_pmu.h diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 43525a9dc947..b8f44b068fc6 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -17,6 +17,13 @@ config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y +config HISI_PMU + bool "HiSilicon SoC PMU" + depends on ARM64 && ACPI + help + Support for HiSilicon SoC uncore performance monitoring + unit (PMU), such as: L3C, HHA and DDRC. + config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && ACPI diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index eaee60cf4b1b..4f5815d3126c 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o +obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile new file mode 100644 index 000000000000..2783bb31e72b --- /dev/null +++ b/drivers/perf/hisilicon/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c new file mode 100644 index 000000000000..7ed24b954422 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -0,0 +1,447 @@ +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M + * Shaokun Zhang + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "hisi_uncore_pmu.h" + +#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) +#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1) + +/* + * PMU format attributes + */ +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +/* + * PMU event attributes + */ +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +/* + * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show + */ +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev)); + + return sprintf(buf, "%d\n", hisi_pmu->on_cpu); +} + +static bool hisi_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + /* Include count for the event */ + int counters = 1; + + if (!is_software_event(leader)) { + /* + * We must NOT create groups containing mixed PMUs, although + * software events are acceptable + */ + if (leader->pmu != event->pmu) + return false; + + /* Increment counter for the leader */ + if (leader != event) + counters++; + } + + list_for_each_entry(sibling, &event->group_leader->sibling_list, + group_entry) { + if (is_software_event(sibling)) + continue; + if (sibling->pmu != event->pmu) + return false; + /* Increment counter for each sibling */ + counters++; + } + + /* The group can not count events more than the counters in the HW */ + return counters <= hisi_pmu->num_counters; +} + +int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx) +{ + return idx >= 0 && idx < hisi_pmu->num_counters; +} + +int hisi_uncore_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = hisi_pmu->pmu_events.used_mask; + u32 num_counters = hisi_pmu->num_counters; + int idx; + + idx = find_first_zero_bit(used_mask, num_counters); + if (idx == num_counters) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) +{ + if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) { + dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, hisi_pmu->pmu_events.used_mask); +} + +int hisi_uncore_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hisi_pmu *hisi_pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * We do not support sampling as the counters are all + * shared by all CPU cores in a CPU die(SCCL). Also we + * do not support attach to a task(per-process mode) + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + /* counters do not have these bits */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.exclude_hv || + event->attr.exclude_idle) + return -EINVAL; + + /* + * The uncore counters not specific to any CPU, so cannot + * support per-task + */ + if (event->cpu < 0) + return -EINVAL; + + /* + * Validate if the events in group does not exceed the + * available counters in hardware. + */ + if (!hisi_validate_event_group(event)) + return -EINVAL; + + hisi_pmu = to_hisi_pmu(event->pmu); + if (event->attr.config > hisi_pmu->check_event) + return -EINVAL; + + if (hisi_pmu->on_cpu == -1) + return -EINVAL; + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. + */ + hwc->idx = -1; + hwc->config_base = event->attr.config; + + /* Enforce to use the same CPU for all events in this PMU */ + event->cpu = hisi_pmu->on_cpu; + + return 0; +} + +/* + * Set the counter to count the event that we're interested in, + * and enable interrupt and counter. + */ +static void hisi_uncore_pmu_enable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx, + HISI_GET_EVENTID(event)); + + hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc); + hisi_pmu->ops->enable_counter(hisi_pmu, hwc); +} + +/* + * Disable counter and interrupt. + */ +static void hisi_uncore_pmu_disable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->disable_counter(hisi_pmu, hwc); + hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc); +} + +void hisi_uncore_pmu_set_event_period(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * The HiSilicon PMU counters support 32 bits or 48 bits, depending on + * the PMU. We reduce it to 2^(counter_bits - 1) to account for the + * extreme interrupt latency. So we could hopefully handle the overflow + * interrupt before another 2^(counter_bits - 1) events occur and the + * counter overtakes its previous value. + */ + u64 val = BIT_ULL(hisi_pmu->counter_bits - 1); + + local64_set(&hwc->prev_count, val); + /* Write start value to the hardware event counter */ + hisi_pmu->ops->write_counter(hisi_pmu, hwc, val); +} + +void hisi_uncore_pmu_event_update(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + + do { + /* Read the count from the counter register */ + new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc); + prev_raw_count = local64_read(&hwc->prev_count); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + /* + * compute the delta + */ + delta = (new_raw_count - prev_raw_count) & + HISI_MAX_PERIOD(hisi_pmu->counter_bits); + local64_add(delta, &event->count); +} + +void hisi_uncore_pmu_start(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + hisi_uncore_pmu_set_event_period(event); + + if (flags & PERF_EF_RELOAD) { + u64 prev_raw_count = local64_read(&hwc->prev_count); + + hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count); + } + + hisi_uncore_pmu_enable_event(event); + perf_event_update_userpage(event); +} + +void hisi_uncore_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_disable_event(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); + hwc->state |= PERF_HES_UPTODATE; +} + +int hisi_uncore_pmu_add(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Get an available counter index for counting */ + idx = hisi_pmu->ops->get_event_idx(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + hisi_pmu->pmu_events.hw_events[idx] = event; + + if (flags & PERF_EF_START) + hisi_uncore_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +void hisi_uncore_pmu_del(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_stop(event, PERF_EF_UPDATE); + hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx); + perf_event_update_userpage(event); + hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} + +void hisi_uncore_pmu_read(struct perf_event *event) +{ + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); +} + +void hisi_uncore_pmu_enable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask, + hisi_pmu->num_counters); + + if (!enabled) + return; + + hisi_pmu->ops->start_counters(hisi_pmu); +} + +void hisi_uncore_pmu_disable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + + hisi_pmu->ops->stop_counters(hisi_pmu); +} + +/* + * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. + * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID + * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is + * in MPIDR[aff1]. If this changes in future, this shall be updated. + */ +static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +{ + u64 mpidr = read_cpuid_mpidr(); + + if (mpidr & MPIDR_MT_BITMASK) { + if (sccl_id) + *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); + if (ccl_id) + *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + } else { + if (sccl_id) + *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + if (ccl_id) + *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + } +} + +/* + * Check whether the CPU is associated with this uncore PMU + */ +static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu) +{ + int sccl_id, ccl_id; + + if (hisi_pmu->ccl_id == -1) { + /* If CCL_ID is -1, the PMU only shares the same SCCL */ + hisi_read_sccl_and_ccl_id(&sccl_id, NULL); + + return sccl_id == hisi_pmu->sccl_id; + } + + hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id); + + return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id; +} + +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + + if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) + return 0; + + cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus); + + /* If another CPU is already managing this PMU, simply return. */ + if (hisi_pmu->on_cpu != -1) + return 0; + + /* Use this CPU in cpumask for event counting */ + hisi_pmu->on_cpu = cpu; + + /* Overflow interrupt also should use the same CPU */ + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu))); + + return 0; +} + +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + cpumask_t pmu_online_cpus; + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus)) + return 0; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hisi_pmu->on_cpu != cpu) + return 0; + + /* Give up ownership of the PMU */ + hisi_pmu->on_cpu = -1; + + /* Choose a new CPU to migrate ownership of the PMU to */ + cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus, + cpu_online_mask); + target = cpumask_any_but(&pmu_online_cpus, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); + /* Use this CPU for event counting */ + hisi_pmu->on_cpu = target; + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target))); + + return 0; +} diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h new file mode 100644 index 000000000000..f21226a0e9c6 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -0,0 +1,102 @@ +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M + * Shaokun Zhang + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __HISI_UNCORE_PMU_H__ +#define __HISI_UNCORE_PMU_H__ + +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_pmu: " fmt + +#define HISI_MAX_COUNTERS 0x10 +#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu)) + +#define HISI_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HISI_PMU_FORMAT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config) +#define HISI_PMU_EVENT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config) + +struct hisi_pmu; + +struct hisi_uncore_ops { + void (*write_evtype)(struct hisi_pmu *, int, u32); + int (*get_event_idx)(struct perf_event *); + u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64); + void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*start_counters)(struct hisi_pmu *); + void (*stop_counters)(struct hisi_pmu *); +}; + +struct hisi_pmu_hwevents { + struct perf_event *hw_events[HISI_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS); +}; + +/* Generic pmu struct for different pmu types */ +struct hisi_pmu { + struct pmu pmu; + const struct hisi_uncore_ops *ops; + struct hisi_pmu_hwevents pmu_events; + /* associated_cpus: All CPUs associated with the PMU */ + cpumask_t associated_cpus; + /* CPU used for counting */ + int on_cpu; + int irq; + struct device *dev; + struct hlist_node node; + int sccl_id; + int ccl_id; + void __iomem *base; + /* the ID of the PMU modules */ + u32 index_id; + int num_counters; + int counter_bits; + /* check event code range */ + int check_event; +}; + +int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx); +int hisi_uncore_pmu_get_event_idx(struct perf_event *event); +void hisi_uncore_pmu_read(struct perf_event *event); +int hisi_uncore_pmu_add(struct perf_event *event, int flags); +void hisi_uncore_pmu_del(struct perf_event *event, int flags); +void hisi_uncore_pmu_start(struct perf_event *event, int flags); +void hisi_uncore_pmu_stop(struct perf_event *event, int flags); +void hisi_uncore_pmu_set_event_period(struct perf_event *event); +void hisi_uncore_pmu_event_update(struct perf_event *event); +int hisi_uncore_pmu_event_init(struct perf_event *event); +void hisi_uncore_pmu_enable(struct pmu *pmu); +void hisi_uncore_pmu_disable(struct pmu *pmu); +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); +#endif /* __HISI_UNCORE_PMU_H__ */ -- cgit From 2940bc4333707a05e69b3ffd737bda0dc0c3004f Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:18 +0800 Subject: perf: hisi: Add support for HiSilicon SoC L3C PMU driver This patch adds support for L3C PMU driver in HiSilicon SoC chip, Each L3C has own control, counter and interrupt registers and is an separate PMU. For each L3C PMU, it has 8-programable counters and each counter is free-running. Interrupt is supported to handle counter (48-bits) overflow. Acked-by: Mark Rutland Reviewed-by: Jonathan Cameron Signed-off-by: Shaokun Zhang Signed-off-by: Anurup M Signed-off-by: Will Deacon --- drivers/perf/hisilicon/Makefile | 2 +- drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 463 +++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 3 files changed, 465 insertions(+), 1 deletion(-) create mode 100644 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 2783bb31e72b..4a3d3e6002a6 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1 +1 @@ -obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c new file mode 100644 index 000000000000..0bde5d919b2e --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -0,0 +1,463 @@ +/* + * HiSilicon SoC L3C uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M + * Shaokun Zhang + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* L3C register definition */ +#define L3C_PERF_CTRL 0x0408 +#define L3C_INT_MASK 0x0800 +#define L3C_INT_STATUS 0x0808 +#define L3C_INT_CLEAR 0x080c +#define L3C_EVENT_CTRL 0x1c00 +#define L3C_EVENT_TYPE0 0x1d00 +/* + * Each counter is 48-bits and [48:63] are reserved + * which are Read-As-Zero and Writes-Ignored. + */ +#define L3C_CNTR0_LOWER 0x1e00 + +/* L3C has 8-counters */ +#define L3C_NR_COUNTERS 0x8 + +#define L3C_PERF_CTRL_EN 0x20000 +#define L3C_EVTYPE_NONE 0xff + +/* + * Select the counter register offset using the counter index + */ +static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx) +{ + return (L3C_CNTR0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) { + dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + /* Read 64-bits and the upper 16 bits are RAZ */ + return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx)); +} + +static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) { + dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + /* Write 64-bits and the upper 16 bits are WI */ + writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx)); +} + +static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(L3C_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * L3C_EVENT_TYPE1 is chosen. + */ + reg = L3C_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to L3C_EVENT_TYPEx Register */ + val = readl(l3c_pmu->base + reg); + val &= ~(L3C_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, l3c_pmu->base + reg); +} + +static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in L3C_PERF_CTRL register to start counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val |= L3C_PERF_CTRL_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val &= ~(L3C_PERF_CTRL_EN); + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 1 to mask interrupt */ + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *l3c_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read L3C_INT_STATUS register */ + overflown = readl(l3c_pmu->base + L3C_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it. + */ + for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR); + + /* Get the corresponding event struct */ + event = l3c_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), l3c_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + l3c_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = { + { "HISI0213", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match); + +static int hisi_l3c_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + unsigned long long id; + struct resource *res; + acpi_status status; + + status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + "_UID", NULL, &id); + if (ACPI_FAILURE(status)) + return -EINVAL; + + l3c_pmu->index_id = id; + + /* + * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while + * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &l3c_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read l3c sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &l3c_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read l3c ccl-id!\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(l3c_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n"); + return PTR_ERR(l3c_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_l3c_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_format_group = { + .name = "format", + .attrs = hisi_l3c_pmu_format_attr, +}; + +static struct attribute *hisi_l3c_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00), + HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01), + HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02), + HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03), + HISI_PMU_EVENT_ATTR(victim_num, 0x04), + HISI_PMU_EVENT_ATTR(rd_spipe, 0x20), + HISI_PMU_EVENT_ATTR(wr_spipe, 0x21), + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22), + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23), + HISI_PMU_EVENT_ATTR(back_invalid, 0x29), + HISI_PMU_EVENT_ATTR(retry_cpu, 0x40), + HISI_PMU_EVENT_ATTR(retry_ring, 0x41), + HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_events_group = { + .name = "events", + .attrs = hisi_l3c_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = { + .attrs = hisi_l3c_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = { + &hisi_l3c_pmu_format_group, + &hisi_l3c_pmu_events_group, + &hisi_l3c_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_l3c_ops = { + .write_evtype = hisi_l3c_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_l3c_pmu_start_counters, + .stop_counters = hisi_l3c_pmu_stop_counters, + .enable_counter = hisi_l3c_pmu_enable_counter, + .disable_counter = hisi_l3c_pmu_disable_counter, + .enable_counter_int = hisi_l3c_pmu_enable_counter_int, + .disable_counter_int = hisi_l3c_pmu_disable_counter_int, + .write_counter = hisi_l3c_pmu_write_counter, + .read_counter = hisi_l3c_pmu_read_counter, +}; + +static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + int ret; + + ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu); + if (ret) + return ret; + + ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev); + if (ret) + return ret; + + l3c_pmu->num_counters = L3C_NR_COUNTERS; + l3c_pmu->counter_bits = 48; + l3c_pmu->ops = &hisi_uncore_l3c_ops; + l3c_pmu->dev = &pdev->dev; + l3c_pmu->on_cpu = -1; + l3c_pmu->check_event = 0x59; + + return 0; +} + +static int hisi_l3c_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu; + char *name; + int ret; + + l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL); + if (!l3c_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, l3c_pmu); + + ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", + l3c_pmu->sccl_id, l3c_pmu->index_id); + l3c_pmu->pmu = (struct pmu) { + .name = name, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_l3c_pmu_attr_groups, + }; + + ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); + if (ret) { + dev_err(l3c_pmu->dev, "L3C PMU register failed!\n"); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + } + + return ret; +} + +static int hisi_l3c_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&l3c_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + + return 0; +} + +static struct platform_driver hisi_l3c_pmu_driver = { + .driver = { + .name = "hisi_l3c_pmu", + .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + }, + .probe = hisi_l3c_pmu_probe, + .remove = hisi_l3c_pmu_remove, +}; + +static int __init hisi_l3c_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + "AP_PERF_ARM_HISI_L3_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_l3c_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); + + return ret; +} +module_init(hisi_l3c_pmu_module_init); + +static void __exit hisi_l3c_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_l3c_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); +} +module_exit(hisi_l3c_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Anurup M "); +MODULE_AUTHOR("Shaokun Zhang "); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6d508767e144..9621efdbb3bd 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -153,6 +153,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, -- cgit From 2bab3cf9104c5ab80a1b9c706d81d997548401e4 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:19 +0800 Subject: perf: hisi: Add support for HiSilicon SoC HHA PMU driver L3 cache coherence is maintained by Hydra Home Agent (HHA) in HiSilicon SoC. This patch adds support for HHA PMU driver, Each HHA has own control, counter and interrupt registers and is an separate PMU. For each HHA PMU, it has 16-programable counters and each counter is free-running. Interrupt is supported to handle counter (48-bits) overflow. Acked-by: Mark Rutland Reviewed-by: Jonathan Cameron Signed-off-by: Shaokun Zhang Signed-off-by: Anurup M Signed-off-by: Will Deacon --- drivers/perf/hisilicon/Makefile | 2 +- drivers/perf/hisilicon/hisi_uncore_hha_pmu.c | 473 +++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 3 files changed, 475 insertions(+), 1 deletion(-) create mode 100644 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 4a3d3e6002a6..a72afe84d034 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1 +1 @@ -obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c new file mode 100644 index 000000000000..443906e0aff3 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -0,0 +1,473 @@ +/* + * HiSilicon SoC HHA uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Shaokun Zhang + * Anurup M + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* HHA register definition */ +#define HHA_INT_MASK 0x0804 +#define HHA_INT_STATUS 0x0808 +#define HHA_INT_CLEAR 0x080C +#define HHA_PERF_CTRL 0x1E00 +#define HHA_EVENT_CTRL 0x1E04 +#define HHA_EVENT_TYPE0 0x1E80 +/* + * Each counter is 48-bits and [48:63] are reserved + * which are Read-As-Zero and Writes-Ignored. + */ +#define HHA_CNT0_LOWER 0x1F00 + +/* HHA has 16-counters */ +#define HHA_NR_COUNTERS 0x10 + +#define HHA_PERF_CTRL_EN 0x1 +#define HHA_EVTYPE_NONE 0xff + +/* + * Select the counter register offset using the counter index + * each counter is 48-bits. + */ +static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) +{ + return (HHA_CNT0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { + dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + /* Read 64 bits and like L3C, top 16 bits are RAZ */ + return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); +} + +static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { + dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + /* Write 64 bits and like L3C, top 16 bits are WI */ + writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); +} + +static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(HHA_EVENT_TYPEx). + * There are 4 event select registers for the 16 hardware counters. + * Event code is 8-bits and for the first 4 hardware counters, + * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters, + * HHA_EVENT_TYPE1 is chosen and so on. + */ + reg = HHA_EVENT_TYPE0 + 4 * (idx / 4); + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to HHA_EVENT_TYPEx register */ + val = readl(hha_pmu->base + reg); + val &= ~(HHA_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, hha_pmu->base + reg); +} + +static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in HHA_PERF_CTRL to start event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val |= HHA_PERF_CTRL_EN; + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in HHA_PERF_CTRL to stop event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val &= ~(HHA_PERF_CTRL_EN); + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *hha_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read HHA_INT_STATUS register */ + overflown = readl(hha_pmu->base + HHA_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it + */ + for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR); + + /* Get the corresponding event struct */ + event = hha_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), hha_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + hha_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { + { "HISI0243", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); + +static int hisi_hha_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + unsigned long long id; + struct resource *res; + acpi_status status; + + status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + "_UID", NULL, &id); + if (ACPI_FAILURE(status)) + return -EINVAL; + + hha_pmu->index_id = id; + + /* + * Use SCCL_ID and UID to identify the HHA PMU, while + * SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &hha_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read hha sccl-id!\n"); + return -EINVAL; + } + /* HHA PMUs only share the same SCCL */ + hha_pmu->ccl_id = -1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hha_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(hha_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n"); + return PTR_ERR(hha_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_hha_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_format_group = { + .name = "format", + .attrs = hisi_hha_pmu_format_attr, +}; + +static struct attribute *hisi_hha_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), + HISI_PMU_EVENT_ATTR(rx_outer, 0x01), + HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), + HISI_PMU_EVENT_ATTR(rx_ccix, 0x03), + HISI_PMU_EVENT_ATTR(rx_wbi, 0x04), + HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), + HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), + HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), + HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), + HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), + HISI_PMU_EVENT_ATTR(spill_num, 0x20), + HISI_PMU_EVENT_ATTR(spill_success, 0x21), + HISI_PMU_EVENT_ATTR(bi_num, 0x23), + HISI_PMU_EVENT_ATTR(mediated_num, 0x32), + HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33), + HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34), + HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35), + HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38), + HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c), + HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40), + HISI_PMU_EVENT_ATTR(edir-lookup, 0x41), + HISI_PMU_EVENT_ATTR(sdir-hit, 0x42), + HISI_PMU_EVENT_ATTR(edir-hit, 0x43), + HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c), + HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_events_group = { + .name = "events", + .attrs = hisi_hha_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_hha_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { + .attrs = hisi_hha_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_hha_pmu_attr_groups[] = { + &hisi_hha_pmu_format_group, + &hisi_hha_pmu_events_group, + &hisi_hha_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_hha_ops = { + .write_evtype = hisi_hha_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_hha_pmu_start_counters, + .stop_counters = hisi_hha_pmu_stop_counters, + .enable_counter = hisi_hha_pmu_enable_counter, + .disable_counter = hisi_hha_pmu_disable_counter, + .enable_counter_int = hisi_hha_pmu_enable_counter_int, + .disable_counter_int = hisi_hha_pmu_disable_counter_int, + .write_counter = hisi_hha_pmu_write_counter, + .read_counter = hisi_hha_pmu_read_counter, +}; + +static int hisi_hha_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + int ret; + + ret = hisi_hha_pmu_init_data(pdev, hha_pmu); + if (ret) + return ret; + + ret = hisi_hha_pmu_init_irq(hha_pmu, pdev); + if (ret) + return ret; + + hha_pmu->num_counters = HHA_NR_COUNTERS; + hha_pmu->counter_bits = 48; + hha_pmu->ops = &hisi_uncore_hha_ops; + hha_pmu->dev = &pdev->dev; + hha_pmu->on_cpu = -1; + hha_pmu->check_event = 0x65; + + return 0; +} + +static int hisi_hha_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu; + char *name; + int ret; + + hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL); + if (!hha_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, hha_pmu); + + ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", + hha_pmu->sccl_id, hha_pmu->index_id); + hha_pmu->pmu = (struct pmu) { + .name = name, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_hha_pmu_attr_groups, + }; + + ret = perf_pmu_register(&hha_pmu->pmu, name, -1); + if (ret) { + dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + } + + return ret; +} + +static int hisi_hha_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&hha_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + + return 0; +} + +static struct platform_driver hisi_hha_pmu_driver = { + .driver = { + .name = "hisi_hha_pmu", + .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + }, + .probe = hisi_hha_pmu_probe, + .remove = hisi_hha_pmu_remove, +}; + +static int __init hisi_hha_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + "AP_PERF_ARM_HISI_HHA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_hha_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); + + return ret; +} +module_init(hisi_hha_pmu_module_init); + +static void __exit hisi_hha_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_hha_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); +} +module_exit(hisi_hha_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang "); +MODULE_AUTHOR("Anurup M "); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9621efdbb3bd..cd63c52e7d93 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -153,6 +153,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, -- cgit From 904dcf03f086a2e3b9d1e02cb57c43ea2e588c8c Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:20 +0800 Subject: perf: hisi: Add support for HiSilicon SoC DDRC PMU driver This patch adds support for DDRC PMU driver in HiSilicon SoC chip, Each DDRC has own control, counter and interrupt registers and is an separate PMU. For each DDRC PMU, it has 8-fixed-purpose counters which have been mapped to 8-events by hardware, it assumes that counter index is equal to event code (0 - 7) in DDRC PMU driver. Interrupt is supported to handle counter (32-bits) overflow. Acked-by: Mark Rutland Reviewed-by: Jonathan Cameron Signed-off-by: Shaokun Zhang Signed-off-by: Anurup M Signed-off-by: Will Deacon --- drivers/perf/hisilicon/Makefile | 2 +- drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c | 463 ++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 3 files changed, 465 insertions(+), 1 deletion(-) create mode 100644 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index a72afe84d034..2621d51ae87a 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1 +1 @@ -obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c new file mode 100644 index 000000000000..1b10ea05a914 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -0,0 +1,463 @@ +/* + * HiSilicon SoC DDRC uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Shaokun Zhang + * Anurup M + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* DDRC register definition */ +#define DDRC_PERF_CTRL 0x010 +#define DDRC_FLUX_WR 0x380 +#define DDRC_FLUX_RD 0x384 +#define DDRC_FLUX_WCMD 0x388 +#define DDRC_FLUX_RCMD 0x38c +#define DDRC_PRE_CMD 0x3c0 +#define DDRC_ACT_CMD 0x3c4 +#define DDRC_BNK_CHG 0x3c8 +#define DDRC_RNK_CHG 0x3cc +#define DDRC_EVENT_CTRL 0x6C0 +#define DDRC_INT_MASK 0x6c8 +#define DDRC_INT_STATUS 0x6cc +#define DDRC_INT_CLEAR 0x6d0 + +/* DDRC has 8-counters */ +#define DDRC_NR_COUNTERS 0x8 +#define DDRC_PERF_CTRL_EN 0x2 + +/* + * For DDRC PMU, there are eight-events and every event has been mapped + * to fixed-purpose counters which register offset is not consistent. + * Therefore there is no write event type and we assume that event + * code (0 to 7) is equal to counter index in PMU driver. + */ +#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7) + +static const u32 ddrc_reg_off[] = { + DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG +}; + +/* + * Select the counter register offset using the counter index. + * In DDRC there are no programmable counter, the count + * is readed form the statistics counter register itself. + */ +static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx) +{ + return ddrc_reg_off[cntr_idx]; +} + +static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + /* Use event code as counter index */ + u32 idx = GET_DDRC_EVENTID(hwc); + + if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) { + dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx)); +} + +static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = GET_DDRC_EVENTID(hwc); + + if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) { + dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + writel((u32)val, + ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx)); +} + +/* + * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware, + * so there is no need to write event type. + */ +static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ +} + +static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Set perf_enable in DDRC_PERF_CTRL to start event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val |= DDRC_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val &= ~DDRC_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Set counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val |= (1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val &= ~(1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + /* For DDRC PMU, we use event code as counter index */ + int idx = GET_DDRC_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val &= ~(1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val |= (1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *ddrc_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read the DDRC_INT_STATUS register */ + overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it + */ + for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR); + + /* Get the corresponding event struct */ + event = ddrc_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), ddrc_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + ddrc_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = { + { "HISI0233", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match); + +static int hisi_ddrc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + struct resource *res; + + /* + * Use the SCCL_ID and DDRC channel ID to identify the + * DDRC PMU, while SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id", + &ddrc_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read ddrc channel-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &ddrc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n"); + return -EINVAL; + } + /* DDRC PMUs only share the same SCCL */ + ddrc_pmu->ccl_id = -1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ddrc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n"); + return PTR_ERR(ddrc_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_ddrc_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-4"), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_format_group = { + .name = "format", + .attrs = hisi_ddrc_pmu_format_attr, +}; + +static struct attribute *hisi_ddrc_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(flux_wr, 0x00), + HISI_PMU_EVENT_ATTR(flux_rd, 0x01), + HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02), + HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03), + HISI_PMU_EVENT_ATTR(pre_cmd, 0x04), + HISI_PMU_EVENT_ATTR(act_cmd, 0x05), + HISI_PMU_EVENT_ATTR(rnk_chg, 0x06), + HISI_PMU_EVENT_ATTR(rw_chg, 0x07), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_events_group = { + .name = "events", + .attrs = hisi_ddrc_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = { + .attrs = hisi_ddrc_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = { + &hisi_ddrc_pmu_format_group, + &hisi_ddrc_pmu_events_group, + &hisi_ddrc_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = { + .write_evtype = hisi_ddrc_pmu_write_evtype, + .get_event_idx = hisi_ddrc_pmu_get_event_idx, + .start_counters = hisi_ddrc_pmu_start_counters, + .stop_counters = hisi_ddrc_pmu_stop_counters, + .enable_counter = hisi_ddrc_pmu_enable_counter, + .disable_counter = hisi_ddrc_pmu_disable_counter, + .enable_counter_int = hisi_ddrc_pmu_enable_counter_int, + .disable_counter_int = hisi_ddrc_pmu_disable_counter_int, + .write_counter = hisi_ddrc_pmu_write_counter, + .read_counter = hisi_ddrc_pmu_read_counter, +}; + +static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + int ret; + + ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev); + if (ret) + return ret; + + ddrc_pmu->num_counters = DDRC_NR_COUNTERS; + ddrc_pmu->counter_bits = 32; + ddrc_pmu->ops = &hisi_uncore_ddrc_ops; + ddrc_pmu->dev = &pdev->dev; + ddrc_pmu->on_cpu = -1; + ddrc_pmu->check_event = 7; + + return 0; +} + +static int hisi_ddrc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu; + char *name; + int ret; + + ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL); + if (!ddrc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, ddrc_pmu); + + ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u", + ddrc_pmu->sccl_id, ddrc_pmu->index_id); + ddrc_pmu->pmu = (struct pmu) { + .name = name, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_ddrc_pmu_attr_groups, + }; + + ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); + if (ret) { + dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n"); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + } + + return ret; +} + +static int hisi_ddrc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&ddrc_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + + return 0; +} + +static struct platform_driver hisi_ddrc_pmu_driver = { + .driver = { + .name = "hisi_ddrc_pmu", + .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + }, + .probe = hisi_ddrc_pmu_probe, + .remove = hisi_ddrc_pmu_remove, +}; + +static int __init hisi_ddrc_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + "AP_PERF_ARM_HISI_DDRC_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_ddrc_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + + return ret; +} +module_init(hisi_ddrc_pmu_module_init); + +static void __exit hisi_ddrc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_ddrc_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + +} +module_exit(hisi_ddrc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang "); +MODULE_AUTHOR("Anurup M "); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index cd63c52e7d93..587006de6f82 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -153,6 +153,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, -- cgit From 07141342149a62db780c02f6352a0610a535eee1 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Thu, 19 Oct 2017 19:05:21 +0800 Subject: arm64: MAINTAINERS: hisi: Add HiSilicon SoC PMU support Add support HiSilicon SoC uncore PMU driver. Acked-by: Mark Rutland Signed-off-by: Shaokun Zhang Signed-off-by: Will Deacon --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 65b0c88d5ee0..835048e62435 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6241,6 +6241,13 @@ S: Maintained F: drivers/net/ethernet/hisilicon/ F: Documentation/devicetree/bindings/net/hisilicon*.txt +HISILICON PMU DRIVER +M: Shaokun Zhang +W: http://www.hisilicon.com +S: Supported +F: drivers/perf/hisilicon +F: Documentation/perf/hisi-pmu.txt + HISILICON ROCE DRIVER M: Lijun Ou M: Wei Hu(Xavier) -- cgit From 5bdecb7971572a1aef828df507558e7a4dfe25ec Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Thu, 19 Oct 2017 16:39:02 +0100 Subject: arm64: Fix the feature type for ID register fields Now that the ARM ARM clearly specifies the rules for inferring the values of the ID register fields, fix the types of the feature bits we have in the kernel. As per ARM ARM DDI0487B.b, section D10.1.4 "Principles of the ID scheme for fields in ID registers" lists the registers to which the scheme applies along with the exceptions. This patch changes the relevant feature bits from FTR_EXACT to FTR_LOWER_SAFE to select the safer value. This will enable an older kernel running on a new CPU detect the safer option rather than completely disabling the feature. Cc: Catalin Marinas Cc: Dave Martin Cc: Mark Rutland Cc: Will Deacon Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 102 ++++++++++++++++++++--------------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3214ee44de56..4d9243969702 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -107,11 +107,11 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), @@ -121,34 +121,34 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), /* Linux doesn't care about the EL3 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs @@ -159,20 +159,20 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -197,14 +197,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { }; static const struct arm64_ftr_bits ftr_id_mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */ ARM64_FTR_END, }; @@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { }; static const struct arm64_ftr_bits ftr_mvfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ ARM64_FTR_END, }; @@ -238,25 +238,25 @@ static const struct arm64_ftr_bits ftr_dczid[] = { static const struct arm64_ftr_bits ftr_id_isar5[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_mmfr4[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */ ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */ ARM64_FTR_END, }; -- cgit From 08395c7f4d9f5808b5754a0dbed969f378bde0c3 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 13 Oct 2017 12:26:44 +0100 Subject: irqdesc: Add function to identify percpu_devid irqs irq_is_percpu indicates whether an irq should only target a single cpu. PERCPU_DEVID flag indicates that an irq can be configured differently on each cpu it can target. Provide a function to check whether an irq is PERCPU_DEVID. Reviewed-by: Thomas Gleixner Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Julien Thierry --- include/linux/irqdesc.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 3e90a094798d..93960cf36e23 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -244,6 +244,14 @@ static inline int irq_is_percpu(unsigned int irq) return desc->status_use_accessors & IRQ_PER_CPU; } +static inline int irq_is_percpu_devid(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_PER_CPU_DEVID; +} + static inline void irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) { -- cgit From 611479c79a94e6713bc044b0410aa8b867375d17 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 13 Oct 2017 12:26:45 +0100 Subject: arm/arm64: pmu: Distinguish percpu irq and percpu_devid irq arm_pmu interrupts are maked as PERCPU even when these are not local physical interrupts to a single CPU. When using non-local interrupts, interrupts marked as PERCPU will not get freed not disabled properly by the PMU driver. Check if interrupts are local to a single CPU with PERCPU_DEVID since this is what the PMU driver really needs to know. Acked-by: Mark Rutland Signed-off-by: Julien Thierry Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 10 +++++----- drivers/perf/arm_pmu_platform.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d14fc2e67f93..7bc5eee96b31 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -539,7 +539,7 @@ void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) return; - if (irq_is_percpu(irq)) { + if (irq_is_percpu_devid(irq)) { free_percpu_irq(irq, &hw_events->percpu_pmu); cpumask_clear(&armpmu->active_irqs); return; @@ -565,10 +565,10 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) if (!irq) return 0; - if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) { + if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { err = request_percpu_irq(irq, handler, "arm-pmu", &hw_events->percpu_pmu); - } else if (irq_is_percpu(irq)) { + } else if (irq_is_percpu_devid(irq)) { int other_cpu = cpumask_first(&armpmu->active_irqs); int other_irq = per_cpu(hw_events->irq, other_cpu); @@ -649,7 +649,7 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) irq = armpmu_get_cpu_irq(pmu, cpu); if (irq) { - if (irq_is_percpu(irq)) { + if (irq_is_percpu_devid(irq)) { enable_percpu_irq(irq, IRQ_TYPE_NONE); return 0; } @@ -667,7 +667,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; irq = armpmu_get_cpu_irq(pmu, cpu); - if (irq && irq_is_percpu(irq)) + if (irq && irq_is_percpu_devid(irq)) disable_percpu_irq(irq); return 0; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 4eafa7a42e52..bbc64ee6dd96 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -126,7 +126,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) if (num_irqs == 1) { int irq = platform_get_irq(pdev, 0); - if (irq && irq_is_percpu(irq)) + if (irq && irq_is_percpu_devid(irq)) return pmu_parse_percpu_irq(pmu, irq); } @@ -149,7 +149,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) if (WARN_ON(irq <= 0)) continue; - if (irq_is_percpu(irq)) { + if (irq_is_percpu_devid(irq)) { pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); return -EINVAL; } -- cgit From 9ca255bf041ddc7698b6906dbd846c0ba64b1fe1 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 24 Oct 2017 07:47:14 -0700 Subject: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io __memcpy_{to,from}io fall back to byte-at-a-time copying if both the source and destination pointers are not 8-byte aligned. Since one of the pointers always points at normal memory, this is unnecessary and detrimental to performance, so only do byte copying until we hit an 8-byte boundary for the device pointer. This change was motivated by performance issues in the pstore driver. On a test platform, measuring probe time for pstore, console buffer size of 1/4MB and pmsg of 1/2MB, was in the 90-107ms region. Change managed to reduce it to 10-25ms, an improvement in boot time. Cc: Kees Cook Cc: Anton Vorontsov Cc: Tony Luck Cc: Catalin Marinas Cc: Will Deacon Cc: Anton Vorontsov Cc: Robin Murphy Signed-off-by: Mark Salyzyn Signed-off-by: Will Deacon --- arch/arm64/kernel/io.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 354be2a872ae..79b17384effa 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -25,8 +25,7 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)from, 8) || - !IS_ALIGNED((unsigned long)to, 8))) { + while (count && !IS_ALIGNED((unsigned long)from, 8)) { *(u8 *)to = __raw_readb(from); from++; to++; @@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)to, 8) || - !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb(*(volatile u8 *)from, to); + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq(*(volatile u64 *)from, to); + __raw_writeq(*(u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb(*(volatile u8 *)from, to); + __raw_writeb(*(u8 *)from, to); from++; to++; count--; -- cgit From e28cc0255997e9b770039d87276c1b9fd8bc231d Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Wed, 25 Oct 2017 10:04:32 +0100 Subject: arm64: Use existing defines for mdscr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Literal values are being used to set single stepping in mdscr from assembly code. There are already existing defines representing those values, use those instead of the literal values. Signed-off-by: Julien Thierry Reviewed-by: Alex Bennée Acked-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d58a6253c6ab..3128a9ca5701 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -65,7 +66,7 @@ .macro disable_step_tsk, flgs, tmp tbz \flgs, #TIF_SINGLESTEP, 9990f mrs \tmp, mdscr_el1 - bic \tmp, \tmp, #1 + bic \tmp, \tmp, #DBG_MDSCR_SS msr mdscr_el1, \tmp isb // Synchronise with enable_dbg 9990: @@ -75,7 +76,7 @@ tbz \flgs, #TIF_SINGLESTEP, 9990f disable_dbg mrs \tmp, mdscr_el1 - orr \tmp, \tmp, #1 + orr \tmp, \tmp, #DBG_MDSCR_SS msr mdscr_el1, \tmp 9990: .endm -- cgit From 6436beeee5721a8e906e9eabf866f12d04470437 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Wed, 25 Oct 2017 10:04:33 +0100 Subject: arm64: Fix single stepping in kernel traps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Software Step exception is missing after stepping a trapped instruction. Ensure SPSR.SS gets set to 0 after emulating/skipping a trapped instruction before doing ERET. Cc: Catalin Marinas Cc: Mark Rutland Signed-off-by: Julien Thierry Reviewed-by: Alex Bennée [will: replaced AARCH32_INSN_SIZE with 4] Signed-off-by: Will Deacon --- arch/arm64/include/asm/traps.h | 6 ++++++ arch/arm64/kernel/armv8_deprecated.c | 8 ++++---- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/traps.c | 21 ++++++++++++++++----- 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d131501c6222..45e3da34bdc4 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -37,6 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook); void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); +/* + * Move regs->pc to next instruction and do necessary setup before it + * is executed. + */ +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size); + static inline int __in_irqentry_text(unsigned long ptr) { return ptr >= (unsigned long)&__irqentry_text_start && diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index f0e6d717885b..f6a831b3bd3a 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -431,7 +431,7 @@ ret: pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n", current->comm, (unsigned long)current->pid, regs->pc); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return 0; fault: @@ -512,7 +512,7 @@ ret: pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n", current->comm, (unsigned long)current->pid, regs->pc); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return 0; } @@ -586,14 +586,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) static int a32_setend_handler(struct pt_regs *regs, u32 instr) { int rc = compat_setend_handler(regs, (instr >> 9) & 1); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return rc; } static int t16_setend_handler(struct pt_regs *regs, u32 instr) { int rc = compat_setend_handler(regs, (instr >> 3) & 1); - regs->pc += 2; + arm64_skip_faulting_instruction(regs, 2); return rc; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4d9243969702..250e8cfff67d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1296,7 +1296,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn) if (!rc) { dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); pt_regs_write_reg(regs, dst, val); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } return rc; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5ea4b85aee0e..a1b7d6420d09 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -293,6 +293,17 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, } } +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) +{ + regs->pc += size; + + /* + * If we were single stepping, we want to get the step exception after + * we return from the trap. + */ + user_fastforward_single_step(current); +} + static LIST_HEAD(undef_hook); static DEFINE_RAW_SPINLOCK(undef_lock); @@ -480,7 +491,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) if (ret) arm64_notify_segfault(regs, address); else - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) @@ -490,7 +501,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) pt_regs_write_reg(regs, rt, val); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) @@ -498,7 +509,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) @@ -506,7 +517,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; pt_regs_write_reg(regs, rt, arch_timer_get_rate()); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } struct sys64_hook { @@ -761,7 +772,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) } /* If thread survives, skip over the BUG instruction and continue: */ - regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */ + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); return DBG_HOOK_HANDLED; } -- cgit From fa3eb71d96f52833a5e12def2a22b2347dd349d0 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 24 Oct 2017 18:40:36 +0100 Subject: arm64: asm-bug: Renumber macro local labels to avoid clashes Currently ASM_BUG() and its constituent macros define local assembler labels 0, 1 and 2 internally, which carries a high risk of clash with callers' labels and consequent mis-assembly. This patch gives the labels a big random offset to minimise the chance of such errors. Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/include/asm/asm-bug.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h index 636e755bcdca..b3552c4a405f 100644 --- a/arch/arm64/include/asm/asm-bug.h +++ b/arch/arm64/include/asm/asm-bug.h @@ -22,10 +22,10 @@ #define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) #define __BUGVERBOSE_LOCATION(file, line) \ .pushsection .rodata.str,"aMS",@progbits,1; \ - 2: .string file; \ + 14472: .string file; \ .popsection; \ \ - .long 2b - 0b; \ + .long 14472b - 14470b; \ .short line; #else #define _BUGVERBOSE_LOCATION(file, line) @@ -36,11 +36,11 @@ #define __BUG_ENTRY(flags) \ .pushsection __bug_table,"aw"; \ .align 2; \ - 0: .long 1f - 0b; \ + 14470: .long 14471f - 14470b; \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ .short flags; \ .popsection; \ - 1: + 14471: #else #define __BUG_ENTRY(flags) #endif -- cgit From 42dbf54e88906ffb8ee992c14e45dd36483f5011 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 19 Oct 2017 11:19:55 +0100 Subject: arm64: consistently log ESR and page table When we take a fault we can't handle, we try to dump some relevant information, but we're not consistent about doing so. In do_mem_abort(), we log the full ESR, but don't dump a page table walk. In __do_kernel_fault, we dump an attempted decoding of the ESR (but not the ESR itself) along with a page table walk. Let's try to make things more consistent by dumping the full ESR in mem_abort_decode(), and having do_mem_abort dump a page table walk. The existing dump of the ESR in do_mem_abort() is rendered redundant, and removed. Tested-by: Laura Abbott Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Julien Thierry Cc: Kristina Martsenko Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 84a586dd063a..6ee22cd8a249 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -109,6 +109,7 @@ static void mem_abort_decode(unsigned int esr) { pr_alert("Mem abort info:\n"); + pr_alert(" ESR = 0x%08x\n", esr); pr_alert(" Exception class = %s, IL = %u bits\n", esr_get_class_string(esr), (esr & ESR_ELx_IL) ? 32 : 16); @@ -692,11 +693,13 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, if (!inf->fn(addr, esr, regs)) return; - pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n", - inf->name, esr, addr); + pr_alert("Unhandled fault: %s at 0x%016lx\n", + inf->name, addr); mem_abort_decode(esr); + show_pte(addr); + info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; -- cgit From a25ffd3a6302a67814280274d8f1aa4ae2ea4b59 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Oct 2017 13:19:20 +0100 Subject: arm64: traps: Don't print stack or raw PC/LR values in backtraces Printing raw pointer values in backtraces has potential security implications and are of questionable value anyway. This patch follows x86's lead and removes the "Exception stack:" dump from kernel backtraces, as well as converting PC/LR values to symbols such as "sysrq_handle_crash+0x20/0x30". Tested-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 8 +++--- arch/arm64/kernel/traps.c | 65 +++------------------------------------------ 2 files changed, 6 insertions(+), 67 deletions(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2dc0f8482210..c20896b8fb2d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -186,11 +186,9 @@ void __show_regs(struct pt_regs *regs) } show_regs_print_info(KERN_DEFAULT); - print_symbol("PC is at %s\n", instruction_pointer(regs)); - print_symbol("LR is at %s\n", lr); - printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", - regs->pc, lr, regs->pstate); - printk("sp : %016llx\n", sp); + print_symbol("pc : %s\n", regs->pc); + print_symbol("lr : %s\n", lr); + printk("sp : %016llx pstate : %08llx\n", sp, regs->pstate); i = top_reg; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index a1b7d6420d09..afb6b19677dc 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -58,55 +58,9 @@ static const char *handler[]= { int show_unhandled_signals = 1; -/* - * Dump out the contents of some kernel memory nicely... - */ -static void dump_mem(const char *lvl, const char *str, unsigned long bottom, - unsigned long top) -{ - unsigned long first; - mm_segment_t fs; - int i; - - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - - printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); - - for (first = bottom & ~31; first < top; first += 32) { - unsigned long p; - char str[sizeof(" 12345678") * 8 + 1]; - - memset(str, ' ', sizeof(str)); - str[sizeof(str) - 1] = '\0'; - - for (p = first, i = 0; i < (32 / 8) - && p < top; i++, p += 8) { - if (p >= bottom && p < top) { - unsigned long val; - - if (__get_user(val, (unsigned long *)p) == 0) - sprintf(str + i * 17, " %016lx", val); - else - sprintf(str + i * 17, " ????????????????"); - } - } - printk("%s%04lx:%s\n", lvl, first & 0xffff, str); - } - - set_fs(fs); -} - static void dump_backtrace_entry(unsigned long where) { - /* - * Note that 'where' can have a physical address, but it's not handled. - */ - print_ip_sym(where); + printk(" %pS\n", (void *)where); } static void __dump_instr(const char *lvl, struct pt_regs *regs) @@ -171,10 +125,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) skip = !!regs; printk("Call trace:\n"); - while (1) { - unsigned long stack; - int ret; - + do { /* skip until specified stack frame */ if (!skip) { dump_backtrace_entry(frame.pc); @@ -189,17 +140,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) */ dump_backtrace_entry(regs->pc); } - ret = unwind_frame(tsk, &frame); - if (ret < 0) - break; - if (in_entry_text(frame.pc)) { - stack = frame.fp - offsetof(struct pt_regs, stackframe); - - if (on_accessible_stack(tsk, stack)) - dump_mem("", "Exception stack", stack, - stack + sizeof(struct pt_regs)); - } - } + } while (!unwind_frame(tsk, &frame)); put_task_stack(tsk); } -- cgit From b7300d4c035cc98c7f5c40e2c4e53f58c16617ea Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Oct 2017 13:26:26 +0100 Subject: arm64: traps: Pretty-print pstate in register dumps We can decode the PSTATE easily enough, so pretty-print it in register dumps. Tested-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c20896b8fb2d..c15ec41e9f84 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -170,6 +170,39 @@ void machine_restart(char *cmd) while (1); } +static void print_pstate(struct pt_regs *regs) +{ + u64 pstate = regs->pstate; + + if (compat_user_mode(regs)) { + printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", + pstate, + pstate & COMPAT_PSR_N_BIT ? 'N' : 'n', + pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z', + pstate & COMPAT_PSR_C_BIT ? 'C' : 'c', + pstate & COMPAT_PSR_V_BIT ? 'V' : 'v', + pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q', + pstate & COMPAT_PSR_T_BIT ? "T32" : "A32", + pstate & COMPAT_PSR_E_BIT ? "BE" : "LE", + pstate & COMPAT_PSR_A_BIT ? 'A' : 'a', + pstate & COMPAT_PSR_I_BIT ? 'I' : 'i', + pstate & COMPAT_PSR_F_BIT ? 'F' : 'f'); + } else { + printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", + pstate, + pstate & PSR_N_BIT ? 'N' : 'n', + pstate & PSR_Z_BIT ? 'Z' : 'z', + pstate & PSR_C_BIT ? 'C' : 'c', + pstate & PSR_V_BIT ? 'V' : 'v', + pstate & PSR_D_BIT ? 'D' : 'd', + pstate & PSR_A_BIT ? 'A' : 'a', + pstate & PSR_I_BIT ? 'I' : 'i', + pstate & PSR_F_BIT ? 'F' : 'f', + pstate & PSR_PAN_BIT ? '+' : '-', + pstate & PSR_UAO_BIT ? '+' : '-'); + } +} + void __show_regs(struct pt_regs *regs) { int i, top_reg; @@ -186,9 +219,10 @@ void __show_regs(struct pt_regs *regs) } show_regs_print_info(KERN_DEFAULT); + print_pstate(regs); print_symbol("pc : %s\n", regs->pc); print_symbol("lr : %s\n", lr); - printk("sp : %016llx pstate : %08llx\n", sp, regs->pstate); + printk("sp : %016llx\n", sp); i = top_reg; -- cgit From d7b1d22d3821ecc946848f5db3e7c728696d9722 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Oct 2017 13:27:11 +0100 Subject: arm64: uapi: Remove PSR_Q_BIT PSTATE.Q only exists for AArch32, which can be referred to using COMPAT_PSR_Q_BIT. Remove PSR_Q_BIT, since the native bit doesn't exist in the architecture Tested-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/include/uapi/asm/ptrace.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index d1ff83dfe5de..3697d95ba0a1 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -46,7 +46,6 @@ #define PSR_D_BIT 0x00000200 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 -#define PSR_Q_BIT 0x08000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 -- cgit From 6218f96c58dbf44a06aeaf767aab1f54fc397838 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 26 Oct 2017 18:36:47 +0100 Subject: arm64: Implement arch-specific pte_access_permitted() The generic pte_access_permitted() implementation only checks for pte_present() (together with the write permission where applicable). However, for both kernel ptes and PROT_NONE mappings pte_present() also returns true on arm64 even though such mappings are not user accessible. Additionally, arm64 now supports execute-only user permission (PROT_EXEC) which is implemented by clearing the PTE_USER bit. With this patch the arm64 implementation of pte_access_permitted() checks for the PTE_VALID and PTE_USER bits together with writable access if applicable. Cc: Reported-by: Al Viro Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b46e54c2399b..c9530b5b5ca8 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending @@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_accessible(mm, pte) \ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) +/* + * p??_access_permitted() is true for valid user mappings (subject to the + * write permission check) other than user execute-only which do not have the + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + */ +#define pte_access_permitted(pte, write) \ + (pte_valid_user(pte) && (!(write) || pte_write(pte))) +#define pmd_access_permitted(pmd, write) \ + (pte_access_permitted(pmd_pte(pmd), (write))) +#define pud_access_permitted(pud, write) \ + (pte_access_permitted(pud_pte(pud), (write))) + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { pte_val(pte) &= ~pgprot_val(prot); -- cgit From fd9dde6abcb9bfe6c6bee48834e157999f113971 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 27 Oct 2017 09:33:41 -0700 Subject: arm64: prevent regressions in compressed kernel image size when upgrading to binutils 2.27 Upon upgrading to binutils 2.27, we found that our lz4 and gzip compressed kernel images were significantly larger, resulting is 10ms boot time regressions. As noted by Rahul: "aarch64 binaries uses RELA relocations, where each relocation entry includes an addend value. This is similar to x86_64. On x86_64, the addend values are also stored at the relocation offset for relative relocations. This is an optimization: in the case where code does not need to be relocated, the loader can simply skip processing relative relocations. In binutils-2.25, both bfd and gold linkers did this for x86_64, but only the gold linker did this for aarch64. The kernel build here is using the bfd linker, which stored zeroes at the relocation offsets for relative relocations. Since a set of zeroes compresses better than a set of non-zero addend values, this behavior was resulting in much better lz4 compression. The bfd linker in binutils-2.27 is now storing the actual addend values at the relocation offsets. The behavior is now consistent with what it does for x86_64 and what gold linker does for both architectures. The change happened in this upstream commit: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=1f56df9d0d5ad89806c24e71f296576d82344613 Since a bunch of zeroes got replaced by non-zero addend values, we see the side effect of lz4 compressed image being a bit bigger. To get the old behavior from the bfd linker, "--no-apply-dynamic-relocs" flag can be used: $ LDFLAGS="--no-apply-dynamic-relocs" make With this flag, the compressed image size is back to what it was with binutils-2.25. If the kernel is using ASLR, there aren't additional runtime costs to --no-apply-dynamic-relocs, as the relocations will need to be applied again anyway after the kernel is relocated to a random address. If the kernel is not using ASLR, then presumably the current default behavior of the linker is better. Since the static linker performed the dynamic relocs, and the kernel is not moved to a different address at load time, it can skip applying the relocations all over again." Some measurements: $ ld -v GNU ld (binutils-2.25-f3d35cf6) 2.25.51.20141117 ^ $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300652760 Oct 26 11:57 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 16932627 Oct 26 11:57 Image.lz4-dtb $ ld -v GNU ld (binutils-2.27-53dd00a1) 2.27.0.20170315 ^ pre patch: $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300376208 Oct 26 11:43 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 18159474 Oct 26 11:43 Image.lz4-dtb post patch: $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300376208 Oct 26 12:06 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 16932466 Oct 26 12:06 Image.lz4-dtb By Siqi's measurement w/ gzip: binutils 2.27 with this patch (with --no-apply-dynamic-relocs): Image 41535488 Image.gz 13404067 binutils 2.27 without this patch (without --no-apply-dynamic-relocs): Image 41535488 Image.gz 14125516 Any compression scheme should be able to get better results from the longer runs of zeros, not just GZIP and LZ4. 10ms boot time savings isn't anything to get excited about, but users of arm64+compression+bfd-2.27 should not have to pay a penalty for no runtime improvement. Reported-by: Gopinath Elanchezhian Reported-by: Sindhuri Pentyala Reported-by: Wei Wang Suggested-by: Ard Biesheuvel Suggested-by: Rahul Chaudhry Suggested-by: Siqi Lin Suggested-by: Stephen Hines Signed-off-by: Nick Desaulniers Reviewed-by: Ard Biesheuvel [will: added comment to Makefile] Signed-off-by: Will Deacon --- arch/arm64/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 939b310913cf..953e43dd0417 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 -ifneq ($(CONFIG_RELOCATABLE),) -LDFLAGS_vmlinux += -pie -shared -Bsymbolic +ifeq ($(CONFIG_RELOCATABLE), y) +# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour +# for relative relocs, since this leads to better Image compression +# with the relocation offsets always being zero. +LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ + $(call ld-option, --no-apply-dynamic-relocs) endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) -- cgit From c80ed088a519da53f27b798a69748eaabc66aadf Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Oct 2017 21:23:19 +0000 Subject: arm64: vdso: fix clock_getres for 4GiB-aligned res The vdso tries to check for a NULL res pointer in __kernel_clock_getres, but only checks the lower 32 bits as is uses CBZ on the W register the res pointer is held in. Thus, if the res pointer happened to be aligned to a 4GiB boundary, we'd spuriously skip storing the timespec to it, while returning a zero error code to the caller. Prevent this by checking the whole pointer, using CBZ on the X register the res pointer is held in. Fixes: 9031fefde6f2ac1d ("arm64: VDSO support") Signed-off-by: Mark Rutland Reported-by: Andrew Pinski Reported-by: Mark Salyzyn Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/gettimeofday.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index 76320e920965..c39872a7b03c 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: - cbz w1, 3f + cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ -- cgit From 80b6eb04b5d05a472a37ae33647b213dd04e59b6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 31 Oct 2017 15:56:11 +0000 Subject: arm64: Don't walk page table for user faults in do_mem_abort Commit 42dbf54e8890 ("arm64: consistently log ESR and page table") dumps page table entries for user faults hitting do_bad entries in the fault handler table. Whilst this shouldn't really happen in practice, it's not beyond the realms of possibility if e.g. running an old kernel on a new CPU. Generally, we want to avoid exposing physical addresses under the control of userspace (see commit bf396c09c24 ("arm64: mm: don't print out page table entries on EL0 faults")), so walk the page tables only on exceptions from EL1. Reported-by: Kristina Martsenko Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6ee22cd8a249..a2a1a6c83da1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -698,7 +698,8 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, mem_abort_decode(esr); - show_pte(addr); + if (!user_mode(regs)) + show_pte(addr); info.si_signo = inf->sig; info.si_errno = 0; -- cgit From c10f0d06ad2652913acd21a630f4c2a97cebbfbd Mon Sep 17 00:00:00 2001 From: Yisheng Xie Date: Wed, 1 Nov 2017 17:46:19 +0800 Subject: arm64: suspend: remove useless included file After commit 9e8e865bbe29 ("arm64: unify idmap removal"), we no need to flush tlb in suspend.c, so the included file tlbflush.h can be removed. Signed-off-by: Yisheng Xie Signed-off-by: Will Deacon --- arch/arm64/kernel/suspend.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 1e3be9064cfa..5794326975f8 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -11,7 +11,6 @@ #include #include #include -#include /* * This is allocated by cpu_suspend_init(), and used to store a pointer to -- cgit From 0fbeb318754860b37150fd42c2058d636a431426 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:34 +0000 Subject: arm64: explicitly mask all exceptions There are a few places where we want to mask all exceptions. Today we do this in a piecemeal fashion, typically we expect the caller to have masked irqs and the arch code masks debug exceptions, ignoring serror which is probably masked. Make it clear that 'mask all exceptions' is the intention by adding helpers to do exactly that. This will let us unmask SError without having to add 'oh and SError' to these paths. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 17 ++++++++++ arch/arm64/include/asm/daifflags.h | 69 ++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/hibernate.c | 5 +-- arch/arm64/kernel/machine_kexec.c | 4 +-- arch/arm64/kernel/smp.c | 9 ++--- arch/arm64/kernel/suspend.c | 7 ++-- arch/arm64/kernel/traps.c | 3 +- arch/arm64/mm/proc.S | 9 +++-- 8 files changed, 104 insertions(+), 19 deletions(-) create mode 100644 arch/arm64/include/asm/daifflags.h diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3128a9ca5701..a4aa22241dd1 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -32,6 +32,23 @@ #include #include + .macro save_and_disable_daif, flags + mrs \flags, daif + msr daifset, #0xf + .endm + + .macro disable_daif + msr daifset, #0xf + .endm + + .macro enable_daif + msr daifclr, #0xf + .endm + + .macro restore_daif, flags:req + msr daif, \flags + .endm + /* * Enable and disable interrupts. */ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h new file mode 100644 index 000000000000..55e2598a8c4c --- /dev/null +++ b/arch/arm64/include/asm/daifflags.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_DAIFFLAGS_H +#define __ASM_DAIFFLAGS_H + +#include + +/* mask/save/unmask/restore all exceptions, including interrupts. */ +static inline void local_daif_mask(void) +{ + asm volatile( + "msr daifset, #0xf // local_daif_mask\n" + : + : + : "memory"); + trace_hardirqs_off(); +} + +static inline unsigned long local_daif_save(void) +{ + unsigned long flags; + + asm volatile( + "mrs %0, daif // local_daif_save\n" + : "=r" (flags) + : + : "memory"); + local_daif_mask(); + + return flags; +} + +static inline void local_daif_unmask(void) +{ + trace_hardirqs_on(); + asm volatile( + "msr daifclr, #0xf // local_daif_unmask" + : + : + : "memory"); +} + +static inline void local_daif_restore(unsigned long flags) +{ + if (!arch_irqs_disabled_flags(flags)) + trace_hardirqs_on(); + asm volatile( + "msr daif, %0 // local_daif_restore" + : + : "r" (flags) + : "memory"); + if (arch_irqs_disabled_flags(flags)) + trace_hardirqs_off(); +} + +#endif diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 095d3c170f5d..3009b8b80f08 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -285,7 +286,7 @@ int swsusp_arch_suspend(void) return -EBUSY; } - local_dbg_save(flags); + flags = local_daif_save(); if (__cpu_suspend_enter(&state)) { /* make the crash dump kernel image visible/saveable */ @@ -315,7 +316,7 @@ int swsusp_arch_suspend(void) __cpu_suspend_exit(); } - local_dbg_restore(flags); + local_daif_restore(flags); return ret; } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 11121f608eb5..f76ea92dff91 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -195,8 +196,7 @@ void machine_kexec(struct kimage *kimage) pr_info("Bye!\n"); - /* Disable all DAIF exceptions. */ - asm volatile ("msr daifset, #0xf" : : : "memory"); + local_daif_mask(); /* * cpu_soft_restart will shutdown the MMU, disable data caches, then diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 824561ef6b8a..c94f4a6515c4 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -370,10 +371,6 @@ void __cpu_die(unsigned int cpu) /* * Called from the idle thread for the CPU which has been shutdown. * - * Note that we disable IRQs here, but do not re-enable them - * before returning to the caller. This is also the behaviour - * of the other hotplug-cpu capable cores, so presumably coming - * out of idle fixes this. */ void cpu_die(void) { @@ -381,7 +378,7 @@ void cpu_die(void) idle_task_exit(); - local_irq_disable(); + local_daif_mask(); /* Tell __cpu_die() that this CPU is now safe to dispose of */ (void)cpu_report_death(); @@ -839,7 +836,7 @@ static void ipi_cpu_stop(unsigned int cpu) { set_cpu_online(cpu, false); - local_irq_disable(); + local_daif_mask(); while (1) cpu_relax(); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 5794326975f8..9079dfe885fa 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -56,7 +57,7 @@ void notrace __cpu_suspend_exit(void) /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled - * through local_dbg_restore. + * by cpu_suspend()s local_daif_restore() call. */ if (hw_breakpoint_restore) hw_breakpoint_restore(cpu); @@ -80,7 +81,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ - local_dbg_save(flags); + flags = local_daif_save(); /* * Function graph tracer state gets incosistent when the kernel @@ -113,7 +114,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ - local_dbg_restore(flags); + local_daif_restore(flags); return ret; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index afb6b19677dc..fa52aefaf7f2 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -594,7 +595,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) esr_get_class_string(esr)); die("Oops - bad mode", regs, 0); - local_irq_disable(); + local_daif_mask(); panic("bad mode"); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 877d42fb0df6..95233dfc4c39 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -109,10 +109,10 @@ ENTRY(cpu_do_resume) /* * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug - * exception. Mask them until local_dbg_restore() in cpu_suspend() + * exception. Mask them until local_daif_restore() in cpu_suspend() * resets them. */ - disable_dbg + disable_daif msr mdscr_el1, x10 msr sctlr_el1, x12 @@ -155,8 +155,7 @@ ENDPROC(cpu_do_switch_mm) * called by anything else. It can only be executed from a TTBR0 mapping. */ ENTRY(idmap_cpu_replace_ttbr1) - mrs x2, daif - msr daifset, #0xf + save_and_disable_daif flags=x2 adrp x1, empty_zero_page msr ttbr1_el1, x1 @@ -169,7 +168,7 @@ ENTRY(idmap_cpu_replace_ttbr1) msr ttbr1_el1, x0 isb - msr daif, x2 + restore_daif x2 ret ENDPROC(idmap_cpu_replace_ttbr1) -- cgit From 65be7a1b799f11ff16f4c154ba4cf82eeb722ec7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:35 +0000 Subject: arm64: introduce an order for exceptions Currently SError is always masked in the kernel. To support RAS exceptions using SError on hardware with the v8.2 RAS Extensions we need to unmask SError as much as possible. Let's define an order for masking and unmasking exceptions. 'dai' is memorable and effectively what we have today. Disabling debug exceptions should cause all other exceptions to be masked. Masking SError should mask irq, but not disable debug exceptions. Masking irqs has no side effects for other flags. Keeping to this order makes it easier for entry.S to know which exceptions should be unmasked. FIQ is never expected, but we mask it when we mask debug exceptions, and unmask it at all other times. Given masking debug exceptions masks everything, we don't need macros to save/restore that bit independently. Remove them and switch the last caller over to use the daif calls. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/irqflags.h | 34 +++++++++++++--------------------- arch/arm64/kernel/debug-monitors.c | 5 +++-- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 8c581281fa12..9ecdca7011f0 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -20,6 +20,19 @@ #include +/* + * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and + * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' + * order: + * Masking debug exceptions causes all other exceptions to be masked too/ + * Masking SError masks irq, but not debug exceptions. Masking irqs has no + * side effects for other flags. Keeping to this order makes it easier for + * entry.S to know which exceptions should be unmasked. + * + * FIQ is never expected, but we mask it when we disable debug exceptions, and + * unmask it at all other times. + */ + /* * CPU interrupt mask handling. */ @@ -89,26 +102,5 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) { return flags & PSR_I_BIT; } - -/* - * save and restore debug state - */ -#define local_dbg_save(flags) \ - do { \ - typecheck(unsigned long, flags); \ - asm volatile( \ - "mrs %0, daif // local_dbg_save\n" \ - "msr daifset, #8" \ - : "=r" (flags) : : "memory"); \ - } while (0) - -#define local_dbg_restore(flags) \ - do { \ - typecheck(unsigned long, flags); \ - asm volatile( \ - "msr daif, %0 // local_dbg_restore\n" \ - : : "r" (flags) : "memory"); \ - } while (0) - #endif #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index c7ef99904934..a88b6ccebbb4 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -46,9 +47,9 @@ u8 debug_monitors_arch(void) static void mdscr_write(u32 mdscr) { unsigned long flags; - local_dbg_save(flags); + flags = local_daif_save(); write_sysreg(mdscr, mdscr_el1); - local_dbg_restore(flags); + local_daif_restore(flags); } NOKPROBE_SYMBOL(mdscr_write); -- cgit From 41bd5b5d22b77c7300df2a2fa5397cbe785189b4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:36 +0000 Subject: arm64: Move the async/fiq helpers to explicitly set process context flags Remove the local_{async,fiq}_{en,dis}able macros as they don't respect our newly defined order and are only used to set the flags for process context when we bring CPUs online. Add a helper to do this. The IRQ flag varies as we want it masked on the boot CPU until we are ready to handle interrupts. The boot CPU unmasks SError during early boot once it can print an error message. If we can print an error message about SError, we can do the same for FIQ. Debug exceptions are already enabled by __cpu_setup(), which has also configured MDSCR_EL1 to disable MDE and KDE. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/daifflags.h | 3 +++ arch/arm64/include/asm/irqflags.h | 6 ------ arch/arm64/kernel/setup.c | 8 +++++--- arch/arm64/kernel/smp.c | 3 +-- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 55e2598a8c4c..22e4c83de5a5 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -18,6 +18,9 @@ #include +#define DAIF_PROCCTX 0 +#define DAIF_PROCCTX_NOIRQ PSR_I_BIT + /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) { diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 9ecdca7011f0..24692edf1a69 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -66,12 +66,6 @@ static inline void arch_local_irq_disable(void) : "memory"); } -#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") -#define local_fiq_disable() asm("msr daifset, #1" : : : "memory") - -#define local_async_enable() asm("msr daifclr, #4" : : : "memory") -#define local_async_disable() asm("msr daifset, #4" : : : "memory") - /* * Save the current interrupt enable state. */ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4bab73e80e33..30ad2f085d1f 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -259,10 +260,11 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); /* - * Unmask asynchronous aborts after bringing up possible earlycon. - * (Report possible System Errors once we can report this occurred) + * Unmask asynchronous aborts and fiq after bringing up possible + * earlycon. (Report possible System Errors once we can report this + * occurred). */ - local_async_enable(); + local_daif_restore(DAIF_PROCCTX_NOIRQ); /* * TTBR0 is only used for the identity mapping at this stage. Make it diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index c94f4a6515c4..551eb07c53b6 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -274,8 +274,7 @@ asmlinkage void secondary_start_kernel(void) set_cpu_online(cpu, true); complete(&cpu_running); - local_irq_enable(); - local_async_enable(); + local_daif_restore(DAIF_PROCCTX); /* * OK, it's off to the idle thread for us -- cgit From 8d66772e869e79ffb94eed7492ca3d2267e150e8 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:37 +0000 Subject: arm64: Mask all exceptions during kernel_exit To take RAS Exceptions as quickly as possible we need to keep SError unmasked as much as possible. We need to mask it during kernel_exit as taking an error from this code will overwrite the exception-registers. Adding a naked 'disable_daif' to kernel_exit causes a performance problem for micro-benchmarks that do no real work, (e.g. calling getpid() in a loop). This is because the ret_to_user loop has already masked IRQs so that the TIF_WORK_MASK thread flags can't change underneath it, adding disable_daif is an additional self-synchronising operation. In the future, the RAS APEI code may need to modify the TIF_WORK_MASK flags from an SError, in which case the ret_to_user loop must mask SError while it examines the flags. Disable all exceptions for return to EL1. For return to EL0 get the ret_to_user loop to leave all exceptions masked once it has done its work, this avoids an extra pstate-write. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 10 +++++----- arch/arm64/kernel/signal.c | 8 ++++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f5e851eeda4b..eff4a07c7ad7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -221,6 +221,8 @@ alternative_else_nop_endif .macro kernel_exit, el .if \el != 0 + disable_daif + /* Restore the task's original addr_limit. */ ldr x20, [sp, #S_ORIG_ADDR_LIMIT] str x20, [tsk, #TSK_TI_ADDR_LIMIT] @@ -517,8 +519,6 @@ el1_da: mov x2, sp // struct pt_regs bl do_mem_abort - // disable interrupts before pulling preserved data off the stack - disable_irq kernel_exit 1 el1_sp_pc: /* @@ -793,7 +793,7 @@ ENDPROC(el0_irq) * and this includes saving x0 back into the kernel stack. */ ret_fast_syscall: - disable_irq // disable interrupts + disable_daif str x0, [sp, #S_X0] // returned x0 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing and x2, x1, #_TIF_SYSCALL_WORK @@ -803,7 +803,7 @@ ret_fast_syscall: enable_step_tsk x1, x2 kernel_exit 0 ret_fast_syscall_trace: - enable_irq // enable interrupts + enable_daif b __sys_trace_return_skipped // we already saved x0 /* @@ -821,7 +821,7 @@ work_pending: * "slow" syscall return path. */ ret_to_user: - disable_irq // disable interrupts + disable_daif ldr x1, [tsk, #TSK_TI_FLAGS] and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0bdc96c61bc0..8e6500c9471b 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -756,9 +757,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, addr_limit_user_check(); if (thread_flags & _TIF_NEED_RESCHED) { + /* Unmask Debug and SError for the next task */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + schedule(); } else { - local_irq_enable(); + local_daif_restore(DAIF_PROCCTX); if (thread_flags & _TIF_UPROBE) uprobe_notify_resume(regs); @@ -775,7 +779,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, fpsimd_restore_current_state(); } - local_irq_disable(); + local_daif_mask(); thread_flags = READ_ONCE(current_thread_info()->flags); } while (thread_flags & _TIF_WORK_MASK); } -- cgit From 84d0fb1bb6257d5d5d45f5e3554ab441b4ff5a74 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:38 +0000 Subject: arm64: entry.S: Remove disable_dbg enable_step_tsk is the only user of disable_dbg, which doesn't respect our 'dai' order for exception masking. enable_step_tsk may enable single-step, so previously needed to mask debug exceptions to prevent us from single-stepping kernel_exit. enable_step_tsk is called at the end of the ret_to_user loop, which has already masked all exceptions so this is no longer needed. Remove disable_dbg, add a comment that enable_step_tsk's caller should have masked debug. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index a4aa22241dd1..8bc63fc98752 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -69,13 +69,6 @@ msr daif, \flags .endm -/* - * Enable and disable debug exceptions. - */ - .macro disable_dbg - msr daifset, #8 - .endm - .macro enable_dbg msr daifclr, #8 .endm @@ -89,9 +82,9 @@ 9990: .endm + /* call with daif masked */ .macro enable_step_tsk, flgs, tmp tbz \flgs, #TIF_SINGLESTEP, 9990f - disable_dbg mrs \tmp, mdscr_el1 orr \tmp, \tmp, #DBG_MDSCR_SS msr mdscr_el1, \tmp -- cgit From b55a5a1b0a7d4f51b6c8eec0d4d78ace8f5fa2b3 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:39 +0000 Subject: arm64: entry.S: convert el1_sync el1_sync unmasks exceptions on a case-by-case basis, debug exceptions are unmasked, unless this was a debug exception. IRQs are unmasked for instruction and data aborts only if the interupted context had irqs unmasked. Following our 'dai' order, el1_dbg should run with everything masked. For the other cases we can inherit whatever we interrupted. Add a macro inherit_daif to set daif based on the interrupted pstate. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 6 ++++++ arch/arm64/kernel/entry.S | 12 ++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8bc63fc98752..1886b8b44672 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -49,6 +49,12 @@ msr daif, \flags .endm + /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */ + .macro inherit_daif, pstate:req, tmp:req + and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) + msr daif, \tmp + .endm + /* * Enable and disable interrupts. */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index eff4a07c7ad7..72fc5969607f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -510,11 +510,7 @@ el1_da: * Data abort handling */ mrs x3, far_el1 - enable_dbg - // re-enable interrupts if they were enabled in the aborted context - tbnz x23, #7, 1f // PSR_I_BIT - enable_irq -1: + inherit_daif pstate=x23, tmp=x2 clear_address_tag x0, x3 mov x2, sp // struct pt_regs bl do_mem_abort @@ -525,7 +521,7 @@ el1_sp_pc: * Stack or PC alignment exception handling */ mrs x0, far_el1 - enable_dbg + inherit_daif pstate=x23, tmp=x2 mov x2, sp bl do_sp_pc_abort ASM_BUG() @@ -533,7 +529,7 @@ el1_undef: /* * Undefined instruction */ - enable_dbg + inherit_daif pstate=x23, tmp=x2 mov x0, sp bl do_undefinstr ASM_BUG() @@ -550,7 +546,7 @@ el1_dbg: kernel_exit 1 el1_inv: // TODO: add support for undefined instructions in kernel mode - enable_dbg + inherit_daif pstate=x23, tmp=x2 mov x0, sp mov x2, x1 mov x1, #BAD_SYNC -- cgit From 746647c75afb5a1706426c2563ff02884a15530d Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:40 +0000 Subject: arm64: entry.S convert el0_sync el0_sync also unmasks exceptions on a case-by-case basis, debug exceptions are enabled, unless this was a debug exception. Irqs are unmasked for some exception types but not for others. el0_dbg should run with everything masked to prevent us taking a debug exception from do_debug_exception. For the other cases we can unmask everything. This changes the behaviour of fpsimd_{acc,exc} and el0_inv which previously ran with irqs masked. This patch removed the last user of enable_dbg_and_irq, remove it. Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 9 --------- arch/arm64/kernel/entry.S | 24 ++++++++++-------------- 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1886b8b44672..90d1f01d602f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -97,15 +97,6 @@ 9990: .endm -/* - * Enable both debug exceptions and interrupts. This is likely to be - * faster than two daifclr operations, since writes to this register - * are self-synchronising. - */ - .macro enable_dbg_and_irq - msr daifclr, #(8 | 2) - .endm - /* * SMP data memory barrier */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 72fc5969607f..a3e6a3954fe0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -670,8 +670,7 @@ el0_da: * Data abort handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_daif ct_user_exit clear_address_tag x0, x26 mov x1, x25 @@ -683,8 +682,7 @@ el0_ia: * Instruction abort handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_daif ct_user_exit mov x0, x26 mov x1, x25 @@ -695,7 +693,7 @@ el0_fpsimd_acc: /* * Floating Point or Advanced SIMD access */ - enable_dbg + enable_daif ct_user_exit mov x0, x25 mov x1, sp @@ -705,7 +703,7 @@ el0_fpsimd_exc: /* * Floating Point or Advanced SIMD exception */ - enable_dbg + enable_daif ct_user_exit mov x0, x25 mov x1, sp @@ -716,8 +714,7 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_daif ct_user_exit mov x0, x26 mov x1, x25 @@ -728,8 +725,7 @@ el0_undef: /* * Undefined instruction */ - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_daif ct_user_exit mov x0, sp bl do_undefinstr @@ -738,7 +734,7 @@ el0_sys: /* * System instructions, for trapped cache maintenance instructions */ - enable_dbg_and_irq + enable_daif ct_user_exit mov x0, x25 mov x1, sp @@ -753,11 +749,11 @@ el0_dbg: mov x1, x25 mov x2, sp bl do_debug_exception - enable_dbg + enable_daif ct_user_exit b ret_to_user el0_inv: - enable_dbg + enable_daif ct_user_exit mov x0, sp mov x1, #BAD_SYNC @@ -836,7 +832,7 @@ el0_svc: mov wsc_nr, #__NR_syscalls el0_svc_naked: // compat entry point stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number - enable_dbg_and_irq + enable_daif ct_user_exit 1 ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks -- cgit From b282e1ce29bb677224ba8fb38e94f5e94e2656d5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Nov 2017 12:12:41 +0000 Subject: arm64: entry.S: convert elX_irq Following our 'dai' order, irqs should be processed with debug and serror exceptions unmasked. Add a helper to unmask these two, (and fiq for good measure). Signed-off-by: James Morse Reviewed-by: Julien Thierry Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 5 +++++ arch/arm64/kernel/entry.S | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 90d1f01d602f..aef72d886677 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -55,6 +55,11 @@ msr daif, \tmp .endm + /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ + .macro enable_da_f + msr daifclr, #(8 | 4 | 1) + .endm + /* * Enable and disable interrupts. */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a3e6a3954fe0..55d08072930a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -557,7 +557,7 @@ ENDPROC(el1_sync) .align 6 el1_irq: kernel_entry 1 - enable_dbg + enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif @@ -766,7 +766,7 @@ ENDPROC(el0_sync) el0_irq: kernel_entry 0 el0_irq_naked: - enable_dbg + enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif -- cgit From a92d4d1454ab8b43b80b89fa31fcedb8821f8164 Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Thu, 2 Nov 2017 12:12:42 +0000 Subject: arm64: entry.S: move SError handling into a C function for future expansion Today SError is taken using the inv_entry macro that ends up in bad_mode. SError can be used by the RAS Extensions to notify either the OS or firmware of CPU problems, some of which may have been corrected. To allow this handling to be added, add a do_serror() C function that just panic()s. Add the entry.S boiler plate to save/restore the CPU registers and unmask debug exceptions. Future patches may change do_serror() to return if the SError Interrupt was notification of a corrected error. Signed-off-by: Xie XiuQi Signed-off-by: Wang Xiongfeng [Split out of a bigger patch, added compat path, renamed, enabled debug exceptions] Signed-off-by: James Morse Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 36 +++++++++++++++++++++++++++++------- arch/arm64/kernel/traps.c | 13 +++++++++++++ 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 55d08072930a..bfa4bd09110a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -375,18 +375,18 @@ ENTRY(vectors) kernel_ventry el1_sync // Synchronous EL1h kernel_ventry el1_irq // IRQ EL1h kernel_ventry el1_fiq_invalid // FIQ EL1h - kernel_ventry el1_error_invalid // Error EL1h + kernel_ventry el1_error // Error EL1h kernel_ventry el0_sync // Synchronous 64-bit EL0 kernel_ventry el0_irq // IRQ 64-bit EL0 kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0 - kernel_ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry el0_error // Error 64-bit EL0 #ifdef CONFIG_COMPAT kernel_ventry el0_sync_compat // Synchronous 32-bit EL0 kernel_ventry el0_irq_compat // IRQ 32-bit EL0 kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry el0_error_compat // Error 32-bit EL0 #else kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0 kernel_ventry el0_irq_invalid // IRQ 32-bit EL0 @@ -455,10 +455,6 @@ ENDPROC(el0_error_invalid) el0_fiq_invalid_compat: inv_entry 0, BAD_FIQ, 32 ENDPROC(el0_fiq_invalid_compat) - -el0_error_invalid_compat: - inv_entry 0, BAD_ERROR, 32 -ENDPROC(el0_error_invalid_compat) #endif el1_sync_invalid: @@ -663,6 +659,10 @@ el0_svc_compat: el0_irq_compat: kernel_entry 0, 32 b el0_irq_naked + +el0_error_compat: + kernel_entry 0, 32 + b el0_error_naked #endif el0_da: @@ -780,6 +780,28 @@ el0_irq_naked: b ret_to_user ENDPROC(el0_irq) +el1_error: + kernel_entry 1 + mrs x1, esr_el1 + enable_dbg + mov x0, sp + bl do_serror + kernel_exit 1 +ENDPROC(el1_error) + +el0_error: + kernel_entry 0 +el0_error_naked: + mrs x1, esr_el1 + enable_dbg + mov x0, sp + bl do_serror + enable_daif + ct_user_exit + b ret_to_user +ENDPROC(el0_error) + + /* * This is the fast syscall return path. We do as little as possible here, * and this includes saving x0 back into the kernel stack. diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index fa52aefaf7f2..c3a8d9876704 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -661,6 +661,19 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs) } #endif +asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) +{ + nmi_enter(); + + console_verbose(); + + pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", + smp_processor_id(), esr, esr_get_class_string(esr)); + __show_regs(regs); + + panic("Asynchronous SError Interrupt"); +} + void __pte_error(const char *file, int line, unsigned long val) { pr_err("%s:%d: bad pte %016lx.\n", file, line, val); -- cgit From d125bffcefb290743e4eac536886dba689edf1eb Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 3 Nov 2017 11:44:16 +0000 Subject: arm64: Fix static use of function graph Function graph does not work currently when CONFIG_DYNAMIC_TRACE is not set. This is because ftrace_function_trace is not always set to ftrace_stub when function_graph is in use. Do not skip checking of graph tracer functions when ftrace_function_trace is set. Signed-off-by: Julien Thierry Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Acked-by: Steven Rostedt (VMware) Reviewed-by: AKASHI Takahiro Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-ftrace.S | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index e1be42e11ff5..1175f5827ae1 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -108,13 +108,8 @@ ENTRY(_mcount) mcount_get_lr x1 // function's lr (= parent's pc) blr x2 // (*ftrace_trace_function)(pc, lr); -#ifndef CONFIG_FUNCTION_GRAPH_TRACER -skip_ftrace_call: // return; - mcount_exit // } -#else - mcount_exit // return; - // } -skip_ftrace_call: +skip_ftrace_call: // } +#ifdef CONFIG_FUNCTION_GRAPH_TRACER ldr_l x2, ftrace_graph_return cmp x0, x2 // if ((ftrace_graph_return b.ne ftrace_graph_caller // != ftrace_stub) @@ -123,9 +118,8 @@ skip_ftrace_call: adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) cmp x0, x2 b.ne ftrace_graph_caller // ftrace_graph_caller(); - - mcount_exit #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + mcount_exit ENDPROC(_mcount) #else /* CONFIG_DYNAMIC_FTRACE */ -- cgit From 19b4aff2025b4336392c30bb638b7d01c6263301 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 3 Nov 2017 11:45:17 +0000 Subject: perf: arm_spe: Prevent module unload while the PMU is in use When the PMU driver is built as a module, the perf expects the pmu->module to be valid, so that the driver is prevented from being unloaded while it is in use. Fix the SPE pmu driver to fill in this field. Cc: Will Deacon Acked-by: Mark Rutland Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 50511b13fd35..8ce262fc2561 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -889,6 +889,7 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) struct device *dev = &spe_pmu->pdev->dev; spe_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, .attr_groups = arm_spe_pmu_attr_groups, /* -- cgit From c7f5828bf77dcbd61d51f4736c1d5aa35663fbb4 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 3 Nov 2017 11:45:18 +0000 Subject: arm-ccn: perf: Prevent module unload while PMU is in use When the PMU driver is built as a module, the perf expects the pmu->module to be valid, so that the driver is prevented from being unloaded while it is in use. Fix the CCN pmu driver to fill in this field. Fixes: a33b0daab73a0 ("bus: ARM CCN PMU driver") Cc: Pawel Moll Cc: Will Deacon Acked-by: Mark Rutland Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- drivers/bus/arm-ccn.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index e8c6946fed9d..3063f5312397 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1276,6 +1276,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Perf driver registration */ ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, .attr_groups = arm_ccn_pmu_attr_groups, .task_ctx_nr = perf_invalid_context, .event_init = arm_ccn_pmu_event_init, -- cgit From 27e64b4be4b863d884f3ec1686a2f744ae93a1b9 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:53 +0000 Subject: regset: Add support for dynamically sized regsets Currently the regset API doesn't allow for the possibility that regsets (or at least, the amount of meaningful data in a regset) may change in size. In particular, this results in useless padding being added to coredumps if a regset's current size is smaller than its theoretical maximum size. This patch adds a get_size() function to struct user_regset. Individual regset implementations can implement this function to return the current size of the regset data. A regset_size() function is added to provide callers with an abstract interface for determining the size of a regset without needing to know whether the regset is dynamically sized or not. The only affected user of this interface is the ELF coredump code: This patch ports ELF coredump to dump regsets with their actual size in the coredump. This has no effect except for new regsets that are dynamically sized and provide a get_size() implementation. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Oleg Nesterov Cc: Alexander Viro Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Dmitry Safonov Cc: H. J. Lu Signed-off-by: Will Deacon --- fs/binfmt_elf.c | 10 ++++---- include/linux/regset.h | 67 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 65 insertions(+), 12 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 73b01e474fdc..c697882b3aba 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1699,7 +1699,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, long signr, size_t *total) { unsigned int i; - unsigned int regset_size = view->regsets[0].n * view->regsets[0].size; + unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); /* * NT_PRSTATUS is the one special case, because the regset data @@ -1708,11 +1708,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, * We assume that regset 0 is NT_PRSTATUS. */ fill_prstatus(&t->prstatus, t->task, signr); - (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size, + (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, &t->prstatus.pr_reg, NULL); fill_note(&t->notes[0], "CORE", NT_PRSTATUS, - PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus); + PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); *total += notesize(&t->notes[0]); do_thread_regset_writeback(t->task, &view->regsets[0]); @@ -1728,7 +1728,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, if (regset->core_note_type && regset->get && (!regset->active || regset->active(t->task, regset))) { int ret; - size_t size = regset->n * regset->size; + size_t size = regset_size(t->task, regset); void *data = kmalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; @@ -1743,7 +1743,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, size, data); else { SET_PR_FPVALID(&t->prstatus, - 1, regset_size); + 1, regset0_size); fill_note(&t->notes[i], "CORE", NT_PRFPREG, size, data); } diff --git a/include/linux/regset.h b/include/linux/regset.h index 8e0c9febf495..494cedaafdf2 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -106,6 +106,28 @@ typedef int user_regset_writeback_fn(struct task_struct *target, const struct user_regset *regset, int immediate); +/** + * user_regset_get_size_fn - type of @get_size function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * + * This call is optional; usually the pointer is %NULL. + * + * When provided, this function must return the current size of regset + * data, as observed by the @get function in &struct user_regset. The + * value returned must be a multiple of @size. The returned size is + * required to be valid only until the next time (if any) @regset is + * modified for @target. + * + * This function is intended for dynamically sized regsets. A regset + * that is statically sized does not need to implement it. + * + * This function should not be called directly: instead, callers should + * call regset_size() to determine the current size of a regset. + */ +typedef unsigned int user_regset_get_size_fn(struct task_struct *target, + const struct user_regset *regset); + /** * struct user_regset - accessible thread CPU state * @n: Number of slots (registers). @@ -117,19 +139,33 @@ typedef int user_regset_writeback_fn(struct task_struct *target, * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. + * @get_size: Function to return the regset's size, or %NULL. * * This data structure describes a machine resource we call a register set. * This is part of the state of an individual thread, not necessarily * actual CPU registers per se. A register set consists of a number of * similar slots, given by @n. Each slot is @size bytes, and aligned to - * @align bytes (which is at least @size). + * @align bytes (which is at least @size). For dynamically-sized + * regsets, @n must contain the maximum possible number of slots for the + * regset, and @get_size must point to a function that returns the + * current regset size. * - * These functions must be called only on the current thread or on a - * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are - * guaranteed will not be woken up and return to user mode, and that we - * have called wait_task_inactive() on. (The target thread always might - * wake up for SIGKILL while these functions are working, in which case - * that thread's user_regset state might be scrambled.) + * Callers that need to know only the current size of the regset and do + * not care about its internal structure should call regset_size() + * instead of inspecting @n or calling @get_size. + * + * For backward compatibility, the @get and @set methods must pad to, or + * accept, @n * @size bytes, even if the current regset size is smaller. + * The precise semantics of these operations depend on the regset being + * accessed. + * + * The functions to which &struct user_regset members point must be + * called only on the current thread or on a thread that is in + * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not + * be woken up and return to user mode, and that we have called + * wait_task_inactive() on. (The target thread always might wake up for + * SIGKILL while these functions are working, in which case that + * thread's user_regset state might be scrambled.) * * The @pos argument must be aligned according to @align; the @count * argument must be a multiple of @size. These functions are not @@ -156,6 +192,7 @@ struct user_regset { user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; + user_regset_get_size_fn *get_size; unsigned int n; unsigned int size; unsigned int align; @@ -371,5 +408,21 @@ static inline int copy_regset_from_user(struct task_struct *target, return regset->set(target, regset, offset, size, NULL, data); } +/** + * regset_size - determine the current size of a regset + * @target: thread to be examined + * @regset: regset to be examined + * + * Note that the returned size is valid only until the next time + * (if any) @regset is modified for @target. + */ +static inline unsigned int regset_size(struct task_struct *target, + const struct user_regset *regset) +{ + if (!regset->get_size) + return regset->n * regset->size; + else + return regset->get_size(target, regset); +} #endif /* */ -- cgit From 94ef7ecbdf6f7d7f13bdf44b9eab4001e71208d6 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:54 +0000 Subject: arm64: fpsimd: Correctly annotate exception helpers called from asm A couple of FPSIMD exception handling functions that are called from entry.S are currently not annotated as such. This is not a big deal since asmlinkage does nothing on arm/arm64, but fixing the annotations is more consistent and may help avoid future surprises. This patch adds appropriate asmlinkage annotations for do_fpsimd_acc() and do_fpsimd_exc(). Reviewed-by: Catalin Marinas Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f444f374bd7b..82786b18ff26 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -102,7 +103,7 @@ static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); /* * Trapped FP/ASIMD access. */ -void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) +asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) { /* TODO: implement lazy context saving/restoring */ WARN_ON(1); @@ -111,7 +112,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) /* * Raise a SIGFPE for the current process. */ -void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) +asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) { siginfo_t info; unsigned int si_code = 0; -- cgit From abf73988a7c2b2130fbee7cd19d2f57695566050 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:55 +0000 Subject: arm64: signal: Verify extra data is user-readable in sys_rt_sigreturn Currently sys_rt_sigreturn() verifies that the base sigframe is readable, but no similar check is performed on the extra data to which an extra_context record points. This matters because the extra data will be read with the unprotected user accessors. However, this is not a problem at present because the extra data base address is required to be exactly at the end of the base sigframe. So, there would need to be a non-user-readable kernel address within about 59K (SIGFRAME_MAXSZ - sizeof(struct rt_sigframe)) of some address for which access_ok(VERIFY_READ) returns true, in order for sigreturn to be able to read kernel memory that should be inaccessible to the user task. This is currently impossible due to the untranslatable address hole between the TTBR0 and TTBR1 address ranges. Disappearance of the hole between the TTBR0 and TTBR1 mapping ranges would require the VA size for TTBR0 and TTBR1 to grow to at least 55 bits, and either the disabling of tagged pointers for userspace or enabling of tagged pointers for kernel space; none of which is currently envisaged. Even so, it is wrong to use the unprotected user accessors without an accompanying access_ok() check. To avoid the potential for future surprises, this patch does an explicit access_ok() check on the extra data space when parsing an extra_context record. Fixes: 33f082614c34 ("arm64: signal: Allow expansion of the signal frame") Reviewed-by: Catalin Marinas Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/kernel/signal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 8e6500c9471b..a032906cf4f7 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -344,6 +344,10 @@ static int parse_user_sigframe(struct user_ctxs *user, */ offset = 0; limit = extra_size; + + if (!access_ok(VERIFY_READ, base, limit)) + goto invalid; + continue; default: -- cgit From 93390c0a1b20b98a59ee0ef1a850687b3fbc304e Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:56 +0000 Subject: arm64: KVM: Hide unsupported AArch64 CPU features from guests Currently, a guest kernel sees the true CPU feature registers (ID_*_EL1) when it reads them using MRS instructions. This means that the guest may observe features that are present in the hardware but the host doesn't understand or doesn't provide support for. A guest may legimitately try to use such a feature as per the architecture, but use of the feature may trap instead of working normally, triggering undef injection into the guest. This is not a problem for the host, but the guest may go wrong when running on newer hardware than the host knows about. This patch hides from guest VMs any AArch64-specific CPU features that the host doesn't support, by exposing to the guest the sanitised versions of the registers computed by the cpufeatures framework, instead of the true hardware registers. To achieve this, HCR_EL2.TID3 is now set for AArch64 guests, and emulation code is added to KVM to report the sanitised versions of the affected registers in response to MRS and register reads from userspace. The affected registers are removed from invariant_sys_regs[] (since the invariant_sys_regs handling is no longer quite correct for them) and added to sys_reg_desgs[], with appropriate access(), get_user() and set_user() methods. No runtime vcpu storage is allocated for the registers: instead, they are read on demand from the cpufeatures framework. This may need modification in the future if there is a need for userspace to customise the features visible to the guest. Attempts by userspace to write the registers are handled similarly to the current invariant_sys_regs handling: writes are permitted, but only if they don't attempt to change the value. This is sufficient to support VM snapshot/restore from userspace. Because of the additional registers, restoring a VM on an older kernel may not work unless userspace knows how to handle the extra VM registers exposed to the KVM user ABI by this patch. Under the principle of least damage, this patch makes no attempt to handle any of the other registers currently in invariant_sys_regs[], or to emulate registers for AArch32: however, these could be handled in a similar way in future, as necessary. Signed-off-by: Dave Martin Reviewed-by: Marc Zyngier Acked-by: Catalin Marinas Acked-by: Christoffer Dall Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 3 + arch/arm64/kvm/hyp/switch.c | 6 + arch/arm64/kvm/sys_regs.c | 282 +++++++++++++++++++++++++++++++++------- 3 files changed, 246 insertions(+), 45 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4dceb12f4596..609d59afe389 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -149,6 +149,9 @@ #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) +#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) +#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) + #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4..35a90b8be3da 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -81,11 +81,17 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) * it will cause an exception. */ val = vcpu->arch.hcr_el2; + if (!(val & HCR_RW) && system_supports_fpsimd()) { write_sysreg(1 << 30, fpexc32_el2); isb(); } + + if (val & HCR_RW) /* for AArch64 only: */ + val |= HCR_TID3; /* TID3: trap feature register accesses */ + write_sysreg(val, hcr_el2); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); /* diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f..b1f75522177c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -892,6 +892,137 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, return true; } +/* Read a sanitised cpufeature ID register by sys_reg_desc */ +static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) +{ + u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, + (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); + + return raz ? 0 : read_sanitised_ftr_reg(id); +} + +/* cpufeature ID register access trap handlers */ + +static bool __access_id_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r, + bool raz) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + p->regval = read_id_reg(r, raz); + return true; +} + +static bool access_id_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + return __access_id_reg(vcpu, p, r, false); +} + +static bool access_raz_id_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + return __access_id_reg(vcpu, p, r, true); +} + +static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); +static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); +static u64 sys_reg_to_index(const struct sys_reg_desc *reg); + +/* + * cpufeature ID register user accessors + * + * For now, these registers are immutable for userspace, so no values + * are stored, and for set_id_reg() we don't allow the effective value + * to be changed. + */ +static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, + bool raz) +{ + const u64 id = sys_reg_to_index(rd); + const u64 val = read_id_reg(rd, raz); + + return reg_to_user(uaddr, &val, id); +} + +static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, + bool raz) +{ + const u64 id = sys_reg_to_index(rd); + int err; + u64 val; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + /* This is what we mean by invariant: you can't change it. */ + if (val != read_id_reg(rd, raz)) + return -EINVAL; + + return 0; +} + +static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + return __get_id_reg(rd, uaddr, false); +} + +static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + return __set_id_reg(rd, uaddr, false); +} + +static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + return __get_id_reg(rd, uaddr, true); +} + +static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + return __set_id_reg(rd, uaddr, true); +} + +/* sys_reg_desc initialiser for known cpufeature ID registers */ +#define ID_SANITISED(name) { \ + SYS_DESC(SYS_##name), \ + .access = access_id_reg, \ + .get_user = get_id_reg, \ + .set_user = set_id_reg, \ +} + +/* + * sys_reg_desc initialiser for architecturally unallocated cpufeature ID + * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 + * (1 <= crm < 8, 0 <= Op2 < 8). + */ +#define ID_UNALLOCATED(crm, op2) { \ + Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ + .access = access_raz_id_reg, \ + .get_user = get_raz_id_reg, \ + .set_user = set_raz_id_reg, \ +} + +/* + * sys_reg_desc initialiser for known ID registers that we hide from guests. + * For now, these are exposed just like unallocated ID regs: they appear + * RAZ for the guest. + */ +#define ID_HIDDEN(name) { \ + SYS_DESC(SYS_##name), \ + .access = access_raz_id_reg, \ + .get_user = get_raz_id_reg, \ + .set_user = set_raz_id_reg, \ +} + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -944,6 +1075,84 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, + + /* + * ID regs: all ID_SANITISED() entries here must have corresponding + * entries in arm64_ftr_regs[]. + */ + + /* AArch64 mappings of the AArch32 ID registers */ + /* CRm=1 */ + ID_SANITISED(ID_PFR0_EL1), + ID_SANITISED(ID_PFR1_EL1), + ID_SANITISED(ID_DFR0_EL1), + ID_HIDDEN(ID_AFR0_EL1), + ID_SANITISED(ID_MMFR0_EL1), + ID_SANITISED(ID_MMFR1_EL1), + ID_SANITISED(ID_MMFR2_EL1), + ID_SANITISED(ID_MMFR3_EL1), + + /* CRm=2 */ + ID_SANITISED(ID_ISAR0_EL1), + ID_SANITISED(ID_ISAR1_EL1), + ID_SANITISED(ID_ISAR2_EL1), + ID_SANITISED(ID_ISAR3_EL1), + ID_SANITISED(ID_ISAR4_EL1), + ID_SANITISED(ID_ISAR5_EL1), + ID_SANITISED(ID_MMFR4_EL1), + ID_UNALLOCATED(2,7), + + /* CRm=3 */ + ID_SANITISED(MVFR0_EL1), + ID_SANITISED(MVFR1_EL1), + ID_SANITISED(MVFR2_EL1), + ID_UNALLOCATED(3,3), + ID_UNALLOCATED(3,4), + ID_UNALLOCATED(3,5), + ID_UNALLOCATED(3,6), + ID_UNALLOCATED(3,7), + + /* AArch64 ID registers */ + /* CRm=4 */ + ID_SANITISED(ID_AA64PFR0_EL1), + ID_SANITISED(ID_AA64PFR1_EL1), + ID_UNALLOCATED(4,2), + ID_UNALLOCATED(4,3), + ID_UNALLOCATED(4,4), + ID_UNALLOCATED(4,5), + ID_UNALLOCATED(4,6), + ID_UNALLOCATED(4,7), + + /* CRm=5 */ + ID_SANITISED(ID_AA64DFR0_EL1), + ID_SANITISED(ID_AA64DFR1_EL1), + ID_UNALLOCATED(5,2), + ID_UNALLOCATED(5,3), + ID_HIDDEN(ID_AA64AFR0_EL1), + ID_HIDDEN(ID_AA64AFR1_EL1), + ID_UNALLOCATED(5,6), + ID_UNALLOCATED(5,7), + + /* CRm=6 */ + ID_SANITISED(ID_AA64ISAR0_EL1), + ID_SANITISED(ID_AA64ISAR1_EL1), + ID_UNALLOCATED(6,2), + ID_UNALLOCATED(6,3), + ID_UNALLOCATED(6,4), + ID_UNALLOCATED(6,5), + ID_UNALLOCATED(6,6), + ID_UNALLOCATED(6,7), + + /* CRm=7 */ + ID_SANITISED(ID_AA64MMFR0_EL1), + ID_SANITISED(ID_AA64MMFR1_EL1), + ID_SANITISED(ID_AA64MMFR2_EL1), + ID_UNALLOCATED(7,3), + ID_UNALLOCATED(7,4), + ID_UNALLOCATED(7,5), + ID_UNALLOCATED(7,6), + ID_UNALLOCATED(7,7), + { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, @@ -1790,8 +1999,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, if (!r) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); - /* Not saved in the sys_reg array? */ - if (r && !r->reg) + /* Not saved in the sys_reg array and not otherwise accessible? */ + if (r && !(r->reg || r->get_user)) r = NULL; return r; @@ -1815,20 +2024,6 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, FUNCTION_INVARIANT(midr_el1) FUNCTION_INVARIANT(ctr_el0) FUNCTION_INVARIANT(revidr_el1) -FUNCTION_INVARIANT(id_pfr0_el1) -FUNCTION_INVARIANT(id_pfr1_el1) -FUNCTION_INVARIANT(id_dfr0_el1) -FUNCTION_INVARIANT(id_afr0_el1) -FUNCTION_INVARIANT(id_mmfr0_el1) -FUNCTION_INVARIANT(id_mmfr1_el1) -FUNCTION_INVARIANT(id_mmfr2_el1) -FUNCTION_INVARIANT(id_mmfr3_el1) -FUNCTION_INVARIANT(id_isar0_el1) -FUNCTION_INVARIANT(id_isar1_el1) -FUNCTION_INVARIANT(id_isar2_el1) -FUNCTION_INVARIANT(id_isar3_el1) -FUNCTION_INVARIANT(id_isar4_el1) -FUNCTION_INVARIANT(id_isar5_el1) FUNCTION_INVARIANT(clidr_el1) FUNCTION_INVARIANT(aidr_el1) @@ -1836,20 +2031,6 @@ FUNCTION_INVARIANT(aidr_el1) static struct sys_reg_desc invariant_sys_regs[] = { { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, - { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 }, - { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 }, - { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 }, - { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 }, - { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 }, - { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 }, - { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 }, - { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 }, - { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 }, - { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 }, - { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 }, - { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 }, - { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 }, - { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 }, { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 }, { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, @@ -2079,12 +2260,31 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) return true; } +static int walk_one_sys_reg(const struct sys_reg_desc *rd, + u64 __user **uind, + unsigned int *total) +{ + /* + * Ignore registers we trap but don't save, + * and for which no custom user accessor is provided. + */ + if (!(rd->reg || rd->get_user)) + return 0; + + if (!copy_reg_to_user(rd, uind)) + return -EFAULT; + + (*total)++; + return 0; +} + /* Assumed ordered tables, see kvm_sys_reg_table_init. */ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) { const struct sys_reg_desc *i1, *i2, *end1, *end2; unsigned int total = 0; size_t num; + int err; /* We check for duplicates here, to allow arch-specific overrides. */ i1 = get_target_table(vcpu->arch.target, true, &num); @@ -2098,21 +2298,13 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) while (i1 || i2) { int cmp = cmp_sys_reg(i1, i2); /* target-specific overrides generic entry. */ - if (cmp <= 0) { - /* Ignore registers we trap but don't save. */ - if (i1->reg) { - if (!copy_reg_to_user(i1, &uind)) - return -EFAULT; - total++; - } - } else { - /* Ignore registers we trap but don't save. */ - if (i2->reg) { - if (!copy_reg_to_user(i2, &uind)) - return -EFAULT; - total++; - } - } + if (cmp <= 0) + err = walk_one_sys_reg(i1, &uind, &total); + else + err = walk_one_sys_reg(i2, &uind, &total); + + if (err) + return err; if (cmp <= 0 && ++i1 == end1) i1 = NULL; -- cgit From b472db6cf8c603809c9e177680a2fb0139218bba Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:57 +0000 Subject: arm64: efi: Add missing Kconfig dependency on KERNEL_MODE_NEON MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The EFI runtime services ABI permits calls to EFI to clobber certain FPSIMD/NEON registers, as per the AArch64 procedure call standard. Saving/restoring the clobbered registers around such calls needs KERNEL_MODE_NEON, but the dependency is missing from Kconfig. This patch adds the missing dependency. This will aid bisection of the patches implementing support for the ARM Scalable Vector Extension (SVE). Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Acked-by: Catalin Marinas Cc: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 05aa5924420e..5cfb79d76970 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1064,6 +1064,7 @@ config EFI_STUB config EFI bool "UEFI runtime support" depends on OF && !CPU_BIG_ENDIAN + depends on KERNEL_MODE_NEON select LIBFDT select UCS2_STRING select EFI_PARAMS_FROM_FDT -- cgit From 38b9aeb32fa732a0678cc0078ea3f4829a3cb89b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:58 +0000 Subject: arm64: Port deprecated instruction emulation to new sysctl interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, armv8_deprected.c takes charge of the "abi" sysctl directory, which makes life difficult for other code that wants to register sysctls in the same directory. There is a "new" [1] sysctl registration interface that removes the need to define ctl_tables for parent directories explicitly, which is ideal here. This patch ports register_insn_emulation_sysctl() over to the register_sysctl() interface and removes the redundant ctl_table for "abi". Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas [1] fea478d4101a (sysctl: Add register_sysctl for normal sysctl users) The commit message notes an intent to port users of the pre-existing interfaces over to register_sysctl(), though the number of users of the new interface currently appears negligible. Signed-off-by: Will Deacon --- arch/arm64/kernel/armv8_deprecated.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index f6a831b3bd3a..b9f2702db440 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -228,15 +228,7 @@ ret: return ret; } -static struct ctl_table ctl_abi[] = { - { - .procname = "abi", - .mode = 0555, - }, - { } -}; - -static void __init register_insn_emulation_sysctl(struct ctl_table *table) +static void __init register_insn_emulation_sysctl(void) { unsigned long flags; int i = 0; @@ -262,8 +254,7 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) } raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); - table->child = insns_sysctl; - register_sysctl_table(table); + register_sysctl("abi", insns_sysctl); } /* @@ -644,7 +635,7 @@ static int __init armv8_deprecated_init(void) cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING, "arm64/isndep:starting", run_all_insn_set_hw_mode, NULL); - register_insn_emulation_sysctl(ctl_abi); + register_insn_emulation_sysctl(); return 0; } -- cgit From 9cf5b54fafedbc381df77c7f8d2aab61f4900657 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:50:59 +0000 Subject: arm64: fpsimd: Simplify uses of {set,clear}_ti_thread_flag() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The existing FPSIMD context switch code contains a couple of instances of {set,clear}_ti_thread(task_thread_info(task)). Since there are thread flag manipulators that operate directly on task_struct, this verbosity isn't strictly needed. For consistency, this patch simplifies the affected calls. This should have no impact on behaviour. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Acked-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 82786b18ff26..901078a9cbe5 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -160,11 +160,9 @@ void fpsimd_thread_switch(struct task_struct *next) if (__this_cpu_read(fpsimd_last_state) == st && st->cpu == smp_processor_id()) - clear_ti_thread_flag(task_thread_info(next), - TIF_FOREIGN_FPSTATE); + clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); else - set_ti_thread_flag(task_thread_info(next), - TIF_FOREIGN_FPSTATE); + set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); } } -- cgit From 672365649ccac68cf6fafecad1a7913951e7493b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:00 +0000 Subject: arm64/sve: System register and exception syndrome definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SVE architecture adds some system registers, ID register fields and a dedicated ESR exception class. This patch adds the appropriate definitions that will be needed by the kernel. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/esr.h | 3 ++- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/sysreg.h | 21 +++++++++++++++++++++ arch/arm64/kernel/traps.c | 1 + 4 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 66ed8b6b9976..014d7d8edcf9 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -43,7 +43,8 @@ #define ESR_ELx_EC_HVC64 (0x16) #define ESR_ELx_EC_SMC64 (0x17) #define ESR_ELx_EC_SYS64 (0x18) -/* Unallocated EC: 0x19 - 0x1E */ +#define ESR_ELx_EC_SVE (0x19) +/* Unallocated EC: 0x1A - 0x1E */ #define ESR_ELx_EC_IMP_DEF (0x1f) #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 61d694c2eae5..dbf05370169a 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -185,6 +185,7 @@ #define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) +#define CPTR_EL2_TZ (1 << 8) #define CPTR_EL2_DEFAULT 0x000033ff /* Hyp Debug Configuration Register bits */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 609d59afe389..08cc88574659 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -145,6 +145,7 @@ #define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) #define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) +#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) @@ -163,6 +164,8 @@ #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) +#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) + #define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) #define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) @@ -346,6 +349,8 @@ #define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7) +#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) + #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) @@ -432,6 +437,7 @@ #define ID_AA64ISAR1_DPB_SHIFT 0 /* id_aa64pfr0 */ +#define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 @@ -440,6 +446,7 @@ #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#define ID_AA64PFR0_SVE 0x1 #define ID_AA64PFR0_FP_NI 0xf #define ID_AA64PFR0_FP_SUPPORTED 0x0 #define ID_AA64PFR0_ASIMD_NI 0xf @@ -541,6 +548,20 @@ #endif +/* + * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which + * are reserved by the SVE architecture for future expansion of the LEN + * field, with compatible semantics. + */ +#define ZCR_ELx_LEN_SHIFT 0 +#define ZCR_ELx_LEN_SIZE 9 +#define ZCR_ELx_LEN_MASK 0x1ff + +#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */ +#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */ +#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + + /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (1UL << 31) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c3a8d9876704..3408b33f6e7c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -556,6 +556,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_HVC64] = "HVC (AArch64)", [ESR_ELx_EC_SMC64] = "SMC (AArch64)", [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", + [ESR_ELx_EC_SVE] = "SVE", [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", -- cgit From 1fc5dce78ad15dce2f23336d339e5f7290b61e19 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:01 +0000 Subject: arm64/sve: Low-level SVE architectural state manipulation functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Manipulating the SVE architectural state, including the vector and predicate registers, first-fault register and the vector length, requires the use of dedicated instructions added by SVE. This patch adds suitable assembly functions for saving and restoring the SVE registers and querying the vector length. Setting of the vector length is done as part of register restore. Since people building kernels may not all get an SVE-enabled toolchain for a while, this patch uses macros that generate explicit opcodes in place of assembler mnemonics. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 5 ++ arch/arm64/include/asm/fpsimdmacros.h | 148 ++++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry-fpsimd.S | 17 ++++ 3 files changed, 170 insertions(+) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 410c48163c6a..026a7c7fc30b 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -67,6 +67,11 @@ extern void fpsimd_update_current_state(struct fpsimd_state *state); extern void fpsimd_flush_task_state(struct task_struct *target); +extern void sve_save_state(void *state, u32 *pfpsr); +extern void sve_load_state(void const *state, u32 const *pfpsr, + unsigned long vq_minus_1); +extern unsigned int sve_get_vl(void); + /* For use by EFI runtime services calls only */ extern void __efi_fpsimd_begin(void); extern void __efi_fpsimd_end(void); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 0f5fdd388b0d..e050d765ca9e 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -75,3 +75,151 @@ ldr w\tmpnr, [\state, #16 * 2 + 4] fpsimd_restore_fpcr x\tmpnr, \state .endm + +/* Sanity-check macros to help avoid encoding garbage instructions */ + +.macro _check_general_reg nr + .if (\nr) < 0 || (\nr) > 30 + .error "Bad register number \nr." + .endif +.endm + +.macro _sve_check_zreg znr + .if (\znr) < 0 || (\znr) > 31 + .error "Bad Scalable Vector Extension vector register number \znr." + .endif +.endm + +.macro _sve_check_preg pnr + .if (\pnr) < 0 || (\pnr) > 15 + .error "Bad Scalable Vector Extension predicate register number \pnr." + .endif +.endm + +.macro _check_num n, min, max + .if (\n) < (\min) || (\n) > (\max) + .error "Number \n out of range [\min,\max]" + .endif +.endm + +/* SVE instruction encodings for non-SVE-capable assemblers */ + +/* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */ +.macro _sve_str_v nz, nxbase, offset=0 + _sve_check_zreg \nz + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0xe5804000 \ + | (\nz) \ + | ((\nxbase) << 5) \ + | (((\offset) & 7) << 10) \ + | (((\offset) & 0x1f8) << 13) +.endm + +/* LDR (vector): LDR Z\nz, [X\nxbase, #\offset, MUL VL] */ +.macro _sve_ldr_v nz, nxbase, offset=0 + _sve_check_zreg \nz + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0x85804000 \ + | (\nz) \ + | ((\nxbase) << 5) \ + | (((\offset) & 7) << 10) \ + | (((\offset) & 0x1f8) << 13) +.endm + +/* STR (predicate): STR P\np, [X\nxbase, #\offset, MUL VL] */ +.macro _sve_str_p np, nxbase, offset=0 + _sve_check_preg \np + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0xe5800000 \ + | (\np) \ + | ((\nxbase) << 5) \ + | (((\offset) & 7) << 10) \ + | (((\offset) & 0x1f8) << 13) +.endm + +/* LDR (predicate): LDR P\np, [X\nxbase, #\offset, MUL VL] */ +.macro _sve_ldr_p np, nxbase, offset=0 + _sve_check_preg \np + _check_general_reg \nxbase + _check_num (\offset), -0x100, 0xff + .inst 0x85800000 \ + | (\np) \ + | ((\nxbase) << 5) \ + | (((\offset) & 7) << 10) \ + | (((\offset) & 0x1f8) << 13) +.endm + +/* RDVL X\nx, #\imm */ +.macro _sve_rdvl nx, imm + _check_general_reg \nx + _check_num (\imm), -0x20, 0x1f + .inst 0x04bf5000 \ + | (\nx) \ + | (((\imm) & 0x3f) << 5) +.endm + +/* RDFFR (unpredicated): RDFFR P\np.B */ +.macro _sve_rdffr np + _sve_check_preg \np + .inst 0x2519f000 \ + | (\np) +.endm + +/* WRFFR P\np.B */ +.macro _sve_wrffr np + _sve_check_preg \np + .inst 0x25289000 \ + | ((\np) << 5) +.endm + +.macro __for from:req, to:req + .if (\from) == (\to) + _for__body \from + .else + __for \from, (\from) + ((\to) - (\from)) / 2 + __for (\from) + ((\to) - (\from)) / 2 + 1, \to + .endif +.endm + +.macro _for var:req, from:req, to:req, insn:vararg + .macro _for__body \var:req + \insn + .endm + + __for \from, \to + + .purgem _for__body +.endm + +.macro sve_save nxbase, xpfpsr, nxtmp + _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 + _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 + _sve_rdffr 0 + _sve_str_p 0, \nxbase + _sve_ldr_p 0, \nxbase, -16 + + mrs x\nxtmp, fpsr + str w\nxtmp, [\xpfpsr] + mrs x\nxtmp, fpcr + str w\nxtmp, [\xpfpsr, #4] +.endm + +.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp + mrs_s x\nxtmp, SYS_ZCR_EL1 + bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK + orr x\nxtmp, x\nxtmp, \xvqminus1 + msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising + + _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 + _sve_ldr_p 0, \nxbase + _sve_wrffr 0 + _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16 + + ldr w\nxtmp, [\xpfpsr] + msr fpsr, x\nxtmp + ldr w\nxtmp, [\xpfpsr, #4] + msr fpcr, x\nxtmp +.endm diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 6a27cd6dbfa6..73f17bffcd23 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -41,3 +41,20 @@ ENTRY(fpsimd_load_state) fpsimd_restore x0, 8 ret ENDPROC(fpsimd_load_state) + +#ifdef CONFIG_ARM64_SVE +ENTRY(sve_save_state) + sve_save 0, x1, 2 + ret +ENDPROC(sve_save_state) + +ENTRY(sve_load_state) + sve_load 0, x1, x2, 3 + ret +ENDPROC(sve_load_state) + +ENTRY(sve_get_vl) + _sve_rdvl 0, 1 + ret +ENDPROC(sve_get_vl) +#endif /* CONFIG_ARM64_SVE */ -- cgit From ddd25ad1fde8456810dee1b26a870395bcd6339d Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:02 +0000 Subject: arm64/sve: Kconfig update and conditional compilation support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds CONFIG_ARM64_SVE to control building of SVE support into the kernel, and adds a stub predicate system_supports_sve() to control conditional compilation and runtime SVE support. system_supports_sve() just returns false for now: it will be replaced with a non-trivial implementation in a later patch, once SVE support is complete enough to be enabled safely. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 11 +++++++++++ arch/arm64/include/asm/cpufeature.h | 5 +++++ 2 files changed, 16 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5cfb79d76970..1046074e0c24 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -976,6 +976,17 @@ config ARM64_PMEM endmenu +config ARM64_SVE + bool "ARM Scalable Vector Extension support" + default y + help + The Scalable Vector Extension (SVE) is an extension to the AArch64 + execution state which complements and extends the SIMD functionality + of the base architecture to support much larger vectors and to enable + additional vectorisation opportunities. + + To enable use of this extension on CPUs that implement it, say Y. + config ARM64_MODULE_CMODEL_LARGE bool diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 428ee1f2468c..4ea3441a8fa7 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -262,6 +262,11 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } +static inline bool system_supports_sve(void) +{ + return false; +} + #endif /* __ASSEMBLY__ */ #endif -- cgit From d0b8cd3187889476144bd9b13bf36a932c3e7952 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:03 +0000 Subject: arm64/sve: Signal frame and context structure definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch defines the representation that will be used for the SVE register state in the signal frame, and implements support for saving and restoring the SVE registers around signals. The same layout will also be used for the in-kernel task state. Due to the variability of the SVE vector length, it is not possible to define a fixed C struct to describe all the registers. Instead, Macros are defined in sigcontext.h to facilitate access to the parts of the structure. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alex Bennée Signed-off-by: Will Deacon --- arch/arm64/include/uapi/asm/sigcontext.h | 117 ++++++++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index f0a76b9fcd6e..7654a81bc577 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -16,6 +16,8 @@ #ifndef _UAPI__ASM_SIGCONTEXT_H #define _UAPI__ASM_SIGCONTEXT_H +#ifndef __ASSEMBLY__ + #include /* @@ -41,10 +43,11 @@ struct sigcontext { * * 0x210 fpsimd_context * 0x10 esr_context + * 0x8a0 sve_context (vl <= 64) (optional) * 0x20 extra_context (optional) * 0x10 terminator (null _aarch64_ctx) * - * 0xdb0 (reserved for future allocation) + * 0x510 (reserved for future allocation) * * New records that can exceed this space need to be opt-in for userspace, so * that an expanded signal frame is not generated unexpectedly. The mechanism @@ -116,4 +119,116 @@ struct extra_context { __u32 __reserved[3]; }; +#define SVE_MAGIC 0x53564501 + +struct sve_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 __reserved[3]; +}; + +#endif /* !__ASSEMBLY__ */ + +/* + * The SVE architecture leaves space for future expansion of the + * vector length beyond its initial architectural limit of 2048 bits + * (16 quadwords). + */ +#define SVE_VQ_BYTES 16 /* number of bytes per quadword */ + +#define SVE_VQ_MIN 1 +#define SVE_VQ_MAX 512 + +#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) +#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) + +#define SVE_NUM_ZREGS 32 +#define SVE_NUM_PREGS 16 + +#define sve_vl_valid(vl) \ + ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) +#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) +#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) + +/* + * If the SVE registers are currently live for the thread at signal delivery, + * sve_context.head.size >= + * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)) + * and the register data may be accessed using the SVE_SIG_*() macros. + * + * If sve_context.head.size < + * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)), + * the SVE registers were not live for the thread and no register data + * is included: in this case, the SVE_SIG_*() macros should not be + * used except for this check. + * + * The same convention applies when returning from a signal: a caller + * will need to remove or resize the sve_context block if it wants to + * make the SVE registers live when they were previously non-live or + * vice-versa. This may require the the caller to allocate fresh + * memory and/or move other context blocks in the signal frame. + * + * Changing the vector length during signal return is not permitted: + * sve_context.vl must equal the thread's current vector length when + * doing a sigreturn. + * + * + * Note: for all these macros, the "vq" argument denotes the SVE + * vector length in quadwords (i.e., units of 128 bits). + * + * The correct way to obtain vq is to use sve_vq_from_vl(vl). The + * result is valid if and only if sve_vl_valid(vl) is true. This is + * guaranteed for a struct sve_context written by the kernel. + * + * + * Additional macros describe the contents and layout of the payload. + * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to + * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the + * size in bytes: + * + * x type description + * - ---- ----------- + * REGS the entire SVE context + * + * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers + * ZREG __uint128_t[vq] individual Z-register Zn + * + * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers + * PREG uint16_t[vq] individual P-register Pn + * + * FFR uint16_t[vq] first-fault status register + * + * Additional data might be appended in the future. + */ + +#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) +#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) +#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) + +#define SVE_SIG_REGS_OFFSET \ + ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ + / SVE_VQ_BYTES * SVE_VQ_BYTES) + +#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET +#define SVE_SIG_ZREG_OFFSET(vq, n) \ + (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) +#define SVE_SIG_ZREGS_SIZE(vq) \ + (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) + +#define SVE_SIG_PREGS_OFFSET(vq) \ + (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) +#define SVE_SIG_PREG_OFFSET(vq, n) \ + (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) +#define SVE_SIG_PREGS_SIZE(vq) \ + (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) + +#define SVE_SIG_FFR_OFFSET(vq) \ + (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) + +#define SVE_SIG_REGS_SIZE(vq) \ + (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) + +#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) + + #endif /* _UAPI__ASM_SIGCONTEXT_H */ -- cgit From 22043a3c082a584434a8c0d8b5e04cb6da985296 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:04 +0000 Subject: arm64/sve: Low-level CPU setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To enable the kernel to use SVE, SVE traps from EL1 to EL2 must be disabled. To take maximum advantage of the hardware, the full available vector length also needs to be enabled for EL1 by programming ZCR_EL2.LEN. (The kernel will program ZCR_EL1.LEN as required, but this cannot override the limit set by ZCR_EL2.) This patch makes the appropriate changes to the EL2 early setup code. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alex Bennée Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index cfa90a43fbe7..67e86a0f57ac 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -524,8 +524,19 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems mov x0, #0x33ff msr cptr_el2, x0 // Disable copro. traps to EL2 + /* SVE register access */ + mrs x1, id_aa64pfr0_el1 + ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 + cbz x1, 7f + + bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps + msr cptr_el2, x0 // Disable copro. traps to EL2 + isb + mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector + msr_s SYS_ZCR_EL2, x1 // length for EL1. + /* Hypervisor stub */ - adr_l x0, __hyp_stub_vectors +7: adr_l x0, __hyp_stub_vectors msr vbar_el2, x0 /* spsr */ -- cgit From bc0ee476036478a85beeed51f0d94c8729fd0544 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:05 +0000 Subject: arm64/sve: Core task context handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds the core support for switching and managing the SVE architectural state of user tasks. Calls to the existing FPSIMD low-level save/restore functions are factored out as new functions task_fpsimd_{save,load}(), since SVE now dynamically may or may not need to be handled at these points depending on the kernel configuration, hardware features discovered at boot, and the runtime state of the task. To make these decisions as fast as possible, const cpucaps are used where feasible, via the system_supports_sve() helper. The SVE registers are only tracked for threads that have explicitly used SVE, indicated by the new thread flag TIF_SVE. Otherwise, the FPSIMD view of the architectural state is stored in thread.fpsimd_state as usual. When in use, the SVE registers are not stored directly in thread_struct due to their potentially large and variable size. Because the task_struct slab allocator must be configured very early during kernel boot, it is also tricky to configure it correctly to match the maximum vector length provided by the hardware, since this depends on examining secondary CPUs as well as the primary. Instead, a pointer sve_state in thread_struct points to a dynamically allocated buffer containing the SVE register data, and code is added to allocate and free this buffer at appropriate times. TIF_SVE is set when taking an SVE access trap from userspace, if suitable hardware support has been detected. This enables SVE for the thread: a subsequent return to userspace will disable the trap accordingly. If such a trap is taken without sufficient system- wide hardware support, SIGILL is sent to the thread instead as if an undefined instruction had been executed: this may happen if userspace tries to use SVE in a system where not all CPUs support it for example. The kernel will clear TIF_SVE and disable SVE for the thread whenever an explicit syscall is made by userspace. For backwards compatibility reasons and conformance with the spirit of the base AArch64 procedure call standard, the subset of the SVE register state that aliases the FPSIMD registers is still preserved across a syscall even if this happens. The remainder of the SVE register state logically becomes zero at syscall entry, though the actual zeroing work is currently deferred until the thread next tries to use SVE, causing another trap to the kernel. This implementation is suboptimal: in the future, the fastpath case may be optimised to zero the registers in-place and leave SVE enabled for the task, where beneficial. TIF_SVE is also cleared in the following slowpath cases, which are taken as reasonable hints that the task may no longer use SVE: * exec * fork and clone Code is added to sync data between thread.fpsimd_state and thread.sve_state whenever enabling/disabling SVE, in a manner consistent with the SVE architectural programmer's model. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Ard Biesheuvel Cc: Alex Bennée [will: added #include to fix allnoconfig build] [will: use enable_daif in do_sve_acc] Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 16 ++ arch/arm64/include/asm/processor.h | 2 + arch/arm64/include/asm/thread_info.h | 4 + arch/arm64/include/asm/traps.h | 2 + arch/arm64/kernel/entry.S | 39 ++++- arch/arm64/kernel/fpsimd.c | 325 ++++++++++++++++++++++++++++++++++- arch/arm64/kernel/process.c | 24 +++ arch/arm64/kernel/traps.c | 6 +- 8 files changed, 407 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 026a7c7fc30b..5655fe1ced46 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -20,6 +20,8 @@ #ifndef __ASSEMBLY__ +#include + /* * FP/SIMD storage area has: * - FPSR and FPCR @@ -72,6 +74,20 @@ extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); +#ifdef CONFIG_ARM64_SVE + +extern size_t sve_state_size(struct task_struct const *task); + +extern void sve_alloc(struct task_struct *task); +extern void fpsimd_release_task(struct task_struct *task); + +#else /* ! CONFIG_ARM64_SVE */ + +static inline void sve_alloc(struct task_struct *task) { } +static inline void fpsimd_release_task(struct task_struct *task) { } + +#endif /* ! CONFIG_ARM64_SVE */ + /* For use by EFI runtime services calls only */ extern void __efi_fpsimd_begin(void); extern void __efi_fpsimd_end(void); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 7dddca21fc64..e2f575dbdddd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -105,6 +105,8 @@ struct thread_struct { unsigned long tp2_value; #endif struct fpsimd_state fpsimd_state; + void *sve_state; /* SVE registers, if any */ + unsigned int sve_vl; /* SVE vector length */ unsigned long fault_address; /* fault info */ unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index ddded6497a8a..92b7b48576c8 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -63,6 +63,8 @@ struct thread_info { void arch_setup_new_exec(void); #define arch_setup_new_exec arch_setup_new_exec +void arch_release_task_struct(struct task_struct *tsk); + #endif /* @@ -92,6 +94,7 @@ void arch_setup_new_exec(void); #define TIF_RESTORE_SIGMASK 20 #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ +#define TIF_SVE 23 /* Scalable Vector Extension in use */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -105,6 +108,7 @@ void arch_setup_new_exec(void); #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_32BIT (1 << TIF_32BIT) +#define _TIF_SVE (1 << TIF_SVE) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 45e3da34bdc4..1696f9de9359 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -34,6 +34,8 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +void force_signal_inject(int signal, int code, struct pt_regs *regs, + unsigned long address); void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index bfa4bd09110a..a989e234dc32 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -599,6 +599,8 @@ el0_sync: b.eq el0_ia cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc + cmp x24, #ESR_ELx_EC_SVE // SVE access + b.eq el0_sve_acc cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_SYS64 // configurable trap @@ -650,6 +652,7 @@ el0_svc_compat: /* * AArch32 syscall handling */ + ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags adrp stbl, compat_sys_call_table // load compat syscall table pointer mov wscno, w7 // syscall number in w7 (r7) mov wsc_nr, #__NR_compat_syscalls @@ -699,9 +702,19 @@ el0_fpsimd_acc: mov x1, sp bl do_fpsimd_acc b ret_to_user +el0_sve_acc: + /* + * Scalable Vector Extension access + */ + enable_daif + ct_user_exit + mov x0, x25 + mov x1, sp + bl do_sve_acc + b ret_to_user el0_fpsimd_exc: /* - * Floating Point or Advanced SIMD exception + * Floating Point, Advanced SIMD or SVE exception */ enable_daif ct_user_exit @@ -849,16 +862,36 @@ ENDPROC(ret_to_user) */ .align 6 el0_svc: + ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags adrp stbl, sys_call_table // load syscall table pointer mov wscno, w8 // syscall number in w8 mov wsc_nr, #__NR_syscalls + +#ifndef CONFIG_ARM64_SVE + b el0_svc_naked +#else + tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set: + bic x16, x16, #_TIF_SVE // discard SVE state + str x16, [tsk, #TSK_TI_FLAGS] + + /* + * task_fpsimd_load() won't be called to update CPACR_EL1 in + * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only + * happens if a context switch or kernel_neon_begin() or context + * modification (sigreturn, ptrace) intervenes. + * So, ensure that CPACR_EL1 is already correct for the fast-path case: + */ + mrs x9, cpacr_el1 + bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0 + msr cpacr_el1, x9 // synchronised by eret to el0 +#endif /* CONFIG_ARM64_SVE */ + el0_svc_naked: // compat entry point stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number enable_daif ct_user_exit 1 - ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks - tst x16, #_TIF_SYSCALL_WORK + tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks b.ne __sys_trace cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 901078a9cbe5..000b5f9215c6 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -18,19 +18,28 @@ */ #include +#include +#include #include #include #include #include +#include #include #include #include +#include #include +#include #include +#include #include #include #include +#include +#include +#include #define FPEXC_IOF (1 << 0) #define FPEXC_DZF (1 << 1) @@ -40,6 +49,8 @@ #define FPEXC_IDF (1 << 7) /* + * (Note: in this discussion, statements about FPSIMD apply equally to SVE.) + * * In order to reduce the number of times the FPSIMD state is needlessly saved * and restored, we need to keep track of two things: * (a) for each task, we need to remember which CPU was the last one to have @@ -100,6 +111,279 @@ */ static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); +/* + * Call __sve_free() directly only if you know task can't be scheduled + * or preempted. + */ +static void __sve_free(struct task_struct *task) +{ + kfree(task->thread.sve_state); + task->thread.sve_state = NULL; +} + +static void sve_free(struct task_struct *task) +{ + WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); + + __sve_free(task); +} + + +/* Offset of FFR in the SVE register dump */ +static size_t sve_ffr_offset(int vl) +{ + return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET; +} + +static void *sve_pffr(struct task_struct *task) +{ + return (char *)task->thread.sve_state + + sve_ffr_offset(task->thread.sve_vl); +} + +static void change_cpacr(u64 val, u64 mask) +{ + u64 cpacr = read_sysreg(CPACR_EL1); + u64 new = (cpacr & ~mask) | val; + + if (new != cpacr) + write_sysreg(new, CPACR_EL1); +} + +static void sve_user_disable(void) +{ + change_cpacr(0, CPACR_EL1_ZEN_EL0EN); +} + +static void sve_user_enable(void) +{ + change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN); +} + +/* + * TIF_SVE controls whether a task can use SVE without trapping while + * in userspace, and also the way a task's FPSIMD/SVE state is stored + * in thread_struct. + * + * The kernel uses this flag to track whether a user task is actively + * using SVE, and therefore whether full SVE register state needs to + * be tracked. If not, the cheaper FPSIMD context handling code can + * be used instead of the more costly SVE equivalents. + * + * * TIF_SVE set: + * + * The task can execute SVE instructions while in userspace without + * trapping to the kernel. + * + * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the + * corresponding Zn), P0-P15 and FFR are encoded in in + * task->thread.sve_state, formatted appropriately for vector + * length task->thread.sve_vl. + * + * task->thread.sve_state must point to a valid buffer at least + * sve_state_size(task) bytes in size. + * + * During any syscall, the kernel may optionally clear TIF_SVE and + * discard the vector state except for the FPSIMD subset. + * + * * TIF_SVE clear: + * + * An attempt by the user task to execute an SVE instruction causes + * do_sve_acc() to be called, which does some preparation and then + * sets TIF_SVE. + * + * When stored, FPSIMD registers V0-V31 are encoded in + * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are + * logically zero but not stored anywhere; P0-P15 and FFR are not + * stored and have unspecified values from userspace's point of + * view. For hygiene purposes, the kernel zeroes them on next use, + * but userspace is discouraged from relying on this. + * + * task->thread.sve_state does not need to be non-NULL, valid or any + * particular size: it must not be dereferenced. + * + * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of + * whether TIF_SVE is clear or set, since these are not vector length + * dependent. + */ + +/* + * Update current's FPSIMD/SVE registers from thread_struct. + * + * This function should be called only when the FPSIMD/SVE state in + * thread_struct is known to be up to date, when preparing to enter + * userspace. + * + * Softirqs (and preemption) must be disabled. + */ +static void task_fpsimd_load(void) +{ + WARN_ON(!in_softirq() && !irqs_disabled()); + + if (system_supports_sve() && test_thread_flag(TIF_SVE)) + sve_load_state(sve_pffr(current), + ¤t->thread.fpsimd_state.fpsr, + sve_vq_from_vl(current->thread.sve_vl) - 1); + else + fpsimd_load_state(¤t->thread.fpsimd_state); + + if (system_supports_sve()) { + /* Toggle SVE trapping for userspace if needed */ + if (test_thread_flag(TIF_SVE)) + sve_user_enable(); + else + sve_user_disable(); + + /* Serialised by exception return to user */ + } +} + +/* + * Ensure current's FPSIMD/SVE storage in thread_struct is up to date + * with respect to the CPU registers. + * + * Softirqs (and preemption) must be disabled. + */ +static void task_fpsimd_save(void) +{ + WARN_ON(!in_softirq() && !irqs_disabled()); + + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + if (system_supports_sve() && test_thread_flag(TIF_SVE)) { + if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { + /* + * Can't save the user regs, so current would + * re-enter user with corrupt state. + * There's no way to recover, so kill it: + */ + force_signal_inject( + SIGKILL, 0, current_pt_regs(), 0); + return; + } + + sve_save_state(sve_pffr(current), + ¤t->thread.fpsimd_state.fpsr); + } else + fpsimd_save_state(¤t->thread.fpsimd_state); + } +} + +#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ + (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) + +/* + * Transfer the FPSIMD state in task->thread.fpsimd_state to + * task->thread.sve_state. + * + * Task can be a non-runnable task, or current. In the latter case, + * softirqs (and preemption) must be disabled. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.fpsimd_state must be up to date before calling this function. + */ +static void fpsimd_to_sve(struct task_struct *task) +{ + unsigned int vq; + void *sst = task->thread.sve_state; + struct fpsimd_state const *fst = &task->thread.fpsimd_state; + unsigned int i; + + if (!system_supports_sve()) + return; + + vq = sve_vq_from_vl(task->thread.sve_vl); + for (i = 0; i < 32; ++i) + memcpy(ZREG(sst, vq, i), &fst->vregs[i], + sizeof(fst->vregs[i])); +} + +#ifdef CONFIG_ARM64_SVE + +/* + * Return how many bytes of memory are required to store the full SVE + * state for task, given task's currently configured vector length. + */ +size_t sve_state_size(struct task_struct const *task) +{ + return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl)); +} + +/* + * Ensure that task->thread.sve_state is allocated and sufficiently large. + * + * This function should be used only in preparation for replacing + * task->thread.sve_state with new data. The memory is always zeroed + * here to prevent stale data from showing through: this is done in + * the interest of testability and predictability: except in the + * do_sve_acc() case, there is no ABI requirement to hide stale data + * written previously be task. + */ +void sve_alloc(struct task_struct *task) +{ + if (task->thread.sve_state) { + memset(task->thread.sve_state, 0, sve_state_size(current)); + return; + } + + /* This is a small allocation (maximum ~8KB) and Should Not Fail. */ + task->thread.sve_state = + kzalloc(sve_state_size(task), GFP_KERNEL); + + /* + * If future SVE revisions can have larger vectors though, + * this may cease to be true: + */ + BUG_ON(!task->thread.sve_state); +} + +/* + * Called from the put_task_struct() path, which cannot get here + * unless dead_task is really dead and not schedulable. + */ +void fpsimd_release_task(struct task_struct *dead_task) +{ + __sve_free(dead_task); +} + +#endif /* CONFIG_ARM64_SVE */ + +/* + * Trapped SVE access + * + * Storage is allocated for the full SVE state, the current FPSIMD + * register contents are migrated across, and TIF_SVE is set so that + * the SVE access trap will be disabled the next time this task + * reaches ret_to_user. + * + * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() + * would have disabled the SVE access trap for userspace during + * ret_to_user, making an SVE access trap impossible in that case. + */ +asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) +{ + /* Even if we chose not to use SVE, the hardware could still trap: */ + if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); + return; + } + + sve_alloc(current); + + local_bh_disable(); + + task_fpsimd_save(); + fpsimd_to_sve(current); + + /* Force ret_to_user to reload the registers: */ + fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); + + if (test_and_set_thread_flag(TIF_SVE)) + WARN_ON(1); /* SVE access shouldn't have trapped */ + + local_bh_enable(); +} + /* * Trapped FP/ASIMD access. */ @@ -145,8 +429,8 @@ void fpsimd_thread_switch(struct task_struct *next) * the registers is in fact the most recent userland FPSIMD state of * 'current'. */ - if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_save_state(¤t->thread.fpsimd_state); + if (current->mm) + task_fpsimd_save(); if (next->mm) { /* @@ -168,6 +452,8 @@ void fpsimd_thread_switch(struct task_struct *next) void fpsimd_flush_thread(void) { + int vl; + if (!system_supports_fpsimd()) return; @@ -175,6 +461,30 @@ void fpsimd_flush_thread(void) memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_flush_task_state(current); + + if (system_supports_sve()) { + clear_thread_flag(TIF_SVE); + sve_free(current); + + /* + * Reset the task vector length as required. + * This is where we ensure that all user tasks have a valid + * vector length configured: no kernel task can become a user + * task without an exec and hence a call to this function. + * If a bug causes this to go wrong, we make some noise and + * try to fudge thread.sve_vl to a safe value here. + */ + vl = current->thread.sve_vl; + + if (vl == 0) + vl = SVE_VL_MIN; + + if (WARN_ON(!sve_vl_valid(vl))) + vl = SVE_VL_MIN; + + current->thread.sve_vl = vl; + } + set_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); @@ -183,6 +493,9 @@ void fpsimd_flush_thread(void) /* * Save the userland FPSIMD state of 'current' to memory, but only if the state * currently held in the registers does in fact belong to 'current' + * + * Currently, SVE tasks can't exist, so just WARN in that case. + * Subsequent patches will add full SVE support here. */ void fpsimd_preserve_current_state(void) { @@ -194,6 +507,8 @@ void fpsimd_preserve_current_state(void) if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_save_state(¤t->thread.fpsimd_state); + WARN_ON_ONCE(test_and_clear_thread_flag(TIF_SVE)); + local_bh_enable(); } @@ -212,7 +527,7 @@ void fpsimd_restore_current_state(void) if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { struct fpsimd_state *st = ¤t->thread.fpsimd_state; - fpsimd_load_state(st); + task_fpsimd_load(); __this_cpu_write(fpsimd_last_state, st); st->cpu = smp_processor_id(); } @@ -381,8 +696,8 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, { switch (cmd) { case CPU_PM_ENTER: - if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_save_state(¤t->thread.fpsimd_state); + if (current->mm) + task_fpsimd_save(); this_cpu_write(fpsimd_last_state, NULL); break; case CPU_PM_EXIT: diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c15ec41e9f84..b2adcce7bc18 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -273,11 +274,27 @@ void release_thread(struct task_struct *dead_task) { } +void arch_release_task_struct(struct task_struct *tsk) +{ + fpsimd_release_task(tsk); +} + +/* + * src and dst may temporarily have aliased sve_state after task_struct + * is copied. We cannot fix this properly here, because src may have + * live SVE state and dst's thread_info may not exist yet, so tweaking + * either src's or dst's TIF_SVE is not safe. + * + * The unaliasing is done in copy_thread() instead. This works because + * dst is not schedulable or traceable until both of these functions + * have been called. + */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { if (current->mm) fpsimd_preserve_current_state(); *dst = *src; + return 0; } @@ -290,6 +307,13 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + /* + * Unalias p->thread.sve_state (if any) from the parent task + * and disable discard SVE state for p: + */ + clear_tsk_thread_flag(p, TIF_SVE); + p->thread.sve_state = NULL; + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 3408b33f6e7c..03f8f8ef542d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -311,8 +311,8 @@ exit: return fn ? fn(regs, instr) : 1; } -static void force_signal_inject(int signal, int code, struct pt_regs *regs, - unsigned long address) +void force_signal_inject(int signal, int code, struct pt_regs *regs, + unsigned long address) { siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); @@ -326,7 +326,7 @@ static void force_signal_inject(int signal, int code, struct pt_regs *regs, desc = "illegal memory access"; break; default: - desc = "bad mode"; + desc = "unknown or unrecoverable error"; break; } -- cgit From 79ab047c75d6a9f95d8840d94f405e20cbacac4b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:06 +0000 Subject: arm64/sve: Support vector length resetting for new processes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's desirable to be able to reset the vector length to some sane default for new processes, since the new binary and its libraries may or may not be SVE-aware. This patch tracks the desired post-exec vector length (if any) in a new thread member sve_vl_onexec, and adds a new thread flag TIF_SVE_VL_INHERIT to control whether to inherit or reset the vector length. Currently these are inactive. Subsequent patches will provide the capability to configure them. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/kernel/fpsimd.c | 16 ++++++++++++---- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index e2f575dbdddd..c6fddb005dc2 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -107,6 +107,7 @@ struct thread_struct { struct fpsimd_state fpsimd_state; void *sve_state; /* SVE registers, if any */ unsigned int sve_vl; /* SVE vector length */ + unsigned int sve_vl_onexec; /* SVE vl after next exec */ unsigned long fault_address; /* fault info */ unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 92b7b48576c8..eb431286bacd 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -95,6 +95,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ #define TIF_SVE 23 /* Scalable Vector Extension in use */ +#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 000b5f9215c6..c7531b85b303 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -111,6 +111,9 @@ */ static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); +/* Default VL for tasks that don't set it explicitly: */ +static int sve_default_vl = SVE_VL_MIN; + /* * Call __sve_free() directly only if you know task can't be scheduled * or preempted. @@ -474,15 +477,20 @@ void fpsimd_flush_thread(void) * If a bug causes this to go wrong, we make some noise and * try to fudge thread.sve_vl to a safe value here. */ - vl = current->thread.sve_vl; - - if (vl == 0) - vl = SVE_VL_MIN; + vl = current->thread.sve_vl_onexec ? + current->thread.sve_vl_onexec : sve_default_vl; if (WARN_ON(!sve_vl_valid(vl))) vl = SVE_VL_MIN; current->thread.sve_vl = vl; + + /* + * If the task is not set to inherit, ensure that the vector + * length will be reset by a subsequent exec: + */ + if (!test_thread_flag(TIF_SVE_VL_INHERIT)) + current->thread.sve_vl_onexec = 0; } set_thread_flag(TIF_FOREIGN_FPSTATE); -- cgit From 8cd969d28fd2848dbfd86f6954dbd71f394f55f3 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:07 +0000 Subject: arm64/sve: Signal handling support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch implements support for saving and restoring the SVE registers around signals. A fixed-size header struct sve_context is always included in the signal frame encoding the thread's vector length at the time of signal delivery, optionally followed by a variable-layout structure encoding the SVE registers. Because of the need to preserve backwards compatibility, the FPSIMD view of the SVE registers is always dumped as a struct fpsimd_context in the usual way, in addition to any sve_context. The SVE vector registers are dumped in full, including bits 127:0 of each register which alias the corresponding FPSIMD vector registers in the hardware. To avoid any ambiguity about which alias to restore during sigreturn, the kernel always restores bits 127:0 of each SVE vector register from the fpsimd_context in the signal frame (which must be present): userspace needs to take this into account if it wants to modify the SVE vector register contents on return from a signal. FPSR and FPCR, which are used by both FPSIMD and SVE, are not included in sve_context because they are always present in fpsimd_context anyway. For signal delivery, a new helper fpsimd_signal_preserve_current_state() is added to update _both_ the FPSIMD and SVE views in the task struct, to make it easier to populate this information into the signal frame. Because of the redundancy between the two views of the state, only one is updated otherwise. Reviewed-by: Catalin Marinas Signed-off-by: Dave Martin Cc: Alex Bennée Cc: Ard Biesheuvel Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/kernel/fpsimd.c | 55 ++++++++++--- arch/arm64/kernel/signal.c | 167 ++++++++++++++++++++++++++++++++++++++-- arch/arm64/kernel/signal32.c | 2 +- 4 files changed, 206 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 5655fe1ced46..9bbd74c0ea6b 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -63,6 +63,7 @@ extern void fpsimd_load_state(struct fpsimd_state *state); extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); +extern void fpsimd_signal_preserve_current_state(void); extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct fpsimd_state *state); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index c7531b85b303..e7733fb19388 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -300,6 +300,32 @@ static void fpsimd_to_sve(struct task_struct *task) sizeof(fst->vregs[i])); } +/* + * Transfer the SVE state in task->thread.sve_state to + * task->thread.fpsimd_state. + * + * Task can be a non-runnable task, or current. In the latter case, + * softirqs (and preemption) must be disabled. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.sve_state must be up to date before calling this function. + */ +static void sve_to_fpsimd(struct task_struct *task) +{ + unsigned int vq; + void const *sst = task->thread.sve_state; + struct fpsimd_state *fst = &task->thread.fpsimd_state; + unsigned int i; + + if (!system_supports_sve()) + return; + + vq = sve_vq_from_vl(task->thread.sve_vl); + for (i = 0; i < 32; ++i) + memcpy(&fst->vregs[i], ZREG(sst, vq, i), + sizeof(fst->vregs[i])); +} + #ifdef CONFIG_ARM64_SVE /* @@ -501,9 +527,6 @@ void fpsimd_flush_thread(void) /* * Save the userland FPSIMD state of 'current' to memory, but only if the state * currently held in the registers does in fact belong to 'current' - * - * Currently, SVE tasks can't exist, so just WARN in that case. - * Subsequent patches will add full SVE support here. */ void fpsimd_preserve_current_state(void) { @@ -511,15 +534,22 @@ void fpsimd_preserve_current_state(void) return; local_bh_disable(); - - if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_save_state(¤t->thread.fpsimd_state); - - WARN_ON_ONCE(test_and_clear_thread_flag(TIF_SVE)); - + task_fpsimd_save(); local_bh_enable(); } +/* + * Like fpsimd_preserve_current_state(), but ensure that + * current->thread.fpsimd_state is updated so that it can be copied to + * the signal frame. + */ +void fpsimd_signal_preserve_current_state(void) +{ + fpsimd_preserve_current_state(); + if (system_supports_sve() && test_thread_flag(TIF_SVE)) + sve_to_fpsimd(current); +} + /* * Load the userland FPSIMD state of 'current' from memory, but only if the * FPSIMD state already held in the registers is /not/ the most recent FPSIMD @@ -555,7 +585,12 @@ void fpsimd_update_current_state(struct fpsimd_state *state) local_bh_disable(); - fpsimd_load_state(state); + if (system_supports_sve() && test_thread_flag(TIF_SVE)) { + current->thread.fpsimd_state = *state; + fpsimd_to_sve(current); + } + task_fpsimd_load(); + if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { struct fpsimd_state *st = ¤t->thread.fpsimd_state; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index a032906cf4f7..b120111a46be 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -64,6 +64,7 @@ struct rt_sigframe_user_layout { unsigned long fpsimd_offset; unsigned long esr_offset; + unsigned long sve_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -180,9 +181,6 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; int err; - /* dump the hardware registers to the fpsimd_state structure */ - fpsimd_preserve_current_state(); - /* copy the FP and status/control registers */ err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); @@ -215,6 +213,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); + clear_thread_flag(TIF_SVE); + /* load the hardware registers from the fpsimd_state structure */ if (!err) fpsimd_update_current_state(&fpsimd); @@ -222,10 +222,118 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) return err ? -EFAULT : 0; } + struct user_ctxs { struct fpsimd_context __user *fpsimd; + struct sve_context __user *sve; }; +#ifdef CONFIG_ARM64_SVE + +static int preserve_sve_context(struct sve_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + unsigned int vl = current->thread.sve_vl; + unsigned int vq = 0; + + if (test_thread_flag(TIF_SVE)) + vq = sve_vq_from_vl(vl); + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(SVE_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), + &ctx->head.size, err); + __put_user_error(vl, &ctx->vl, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + if (vq) { + /* + * This assumes that the SVE state has already been saved to + * the task struct by calling preserve_fpsimd_context(). + */ + err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, + current->thread.sve_state, + SVE_SIG_REGS_SIZE(vq)); + } + + return err ? -EFAULT : 0; +} + +static int restore_sve_fpsimd_context(struct user_ctxs *user) +{ + int err; + unsigned int vq; + struct fpsimd_state fpsimd; + struct sve_context sve; + + if (__copy_from_user(&sve, user->sve, sizeof(sve))) + return -EFAULT; + + if (sve.vl != current->thread.sve_vl) + return -EINVAL; + + if (sve.head.size <= sizeof(*user->sve)) { + clear_thread_flag(TIF_SVE); + goto fpsimd_only; + } + + vq = sve_vq_from_vl(sve.vl); + + if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.sve_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + barrier(); + /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */ + + set_thread_flag(TIF_FOREIGN_FPSTATE); + barrier(); + /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ + + sve_alloc(current); + err = __copy_from_user(current->thread.sve_state, + (char __user const *)user->sve + + SVE_SIG_REGS_OFFSET, + SVE_SIG_REGS_SIZE(vq)); + if (err) + return -EFAULT; + + set_thread_flag(TIF_SVE); + +fpsimd_only: + /* copy the FP and status/control registers */ + /* restore_sigframe() already checked that user->fpsimd != NULL. */ + err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, + sizeof(fpsimd.vregs)); + __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); + __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); + + /* load the hardware registers from the fpsimd_state structure */ + if (!err) + fpsimd_update_current_state(&fpsimd); + + return err ? -EFAULT : 0; +} + +#else /* ! CONFIG_ARM64_SVE */ + +/* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_sve_context(void __user *ctx); +extern int restore_sve_fpsimd_context(struct user_ctxs *user); + +#endif /* ! CONFIG_ARM64_SVE */ + + static int parse_user_sigframe(struct user_ctxs *user, struct rt_sigframe __user *sf) { @@ -238,6 +346,7 @@ static int parse_user_sigframe(struct user_ctxs *user, char const __user *const sfp = (char const __user *)sf; user->fpsimd = NULL; + user->sve = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -288,6 +397,19 @@ static int parse_user_sigframe(struct user_ctxs *user, /* ignore */ break; + case SVE_MAGIC: + if (!system_supports_sve()) + goto invalid; + + if (user->sve) + goto invalid; + + if (size < sizeof(*user->sve)) + goto invalid; + + user->sve = (struct sve_context __user *)head; + break; + case EXTRA_MAGIC: if (have_extra_context) goto invalid; @@ -364,9 +486,6 @@ static int parse_user_sigframe(struct user_ctxs *user, } done: - if (!user->fpsimd) - goto invalid; - return 0; invalid: @@ -400,8 +519,19 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0) err = parse_user_sigframe(&user, sf); - if (err == 0) - err = restore_fpsimd_context(user.fpsimd); + if (err == 0) { + if (!user.fpsimd) + return -EINVAL; + + if (user.sve) { + if (!system_supports_sve()) + return -EINVAL; + + err = restore_sve_fpsimd_context(&user); + } else { + err = restore_fpsimd_context(user.fpsimd); + } + } return err; } @@ -460,6 +590,18 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user) return err; } + if (system_supports_sve()) { + unsigned int vq = 0; + + if (test_thread_flag(TIF_SVE)) + vq = sve_vq_from_vl(current->thread.sve_vl); + + err = sigframe_alloc(user, &user->sve_offset, + SVE_SIG_CONTEXT_SIZE(vq)); + if (err) + return err; + } + return sigframe_alloc_end(user); } @@ -501,6 +643,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); } + /* Scalable Vector Extension state, if present */ + if (system_supports_sve() && err == 0 && user->sve_offset) { + struct sve_context __user *sve_ctx = + apply_user_offset(user, user->sve_offset); + err |= preserve_sve_context(sve_ctx); + } + if (err == 0 && user->extra_offset) { char __user *sfp = (char __user *)user->sigframe; char __user *userp = @@ -600,6 +749,8 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, struct rt_sigframe __user *frame; int err = 0; + fpsimd_signal_preserve_current_state(); + if (get_sigframe(&user, ksig, regs)) return 1; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index e09bf5d15606..22711ee8e36c 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -239,7 +239,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) * Note that this also saves V16-31, which aren't visible * in AArch32. */ - fpsimd_preserve_current_state(); + fpsimd_signal_preserve_current_state(); /* Place structure header on the stack */ __put_user_error(magic, &frame->magic, err); -- cgit From 7582e22038a266444eb87bc07c372592ad647439 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:08 +0000 Subject: arm64/sve: Backend logic for setting the vector length MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch implements the core logic for changing a task's vector length on request from userspace. This will be used by the ptrace and prctl frontends that are implemented in later patches. The SVE architecture permits, but does not require, implementations to support vector lengths that are not a power of two. To handle this, logic is added to check a requested vector length against a possibly sparse bitmap of available vector lengths at runtime, so that the best supported value can be chosen. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alex Bennée Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 8 +++ arch/arm64/kernel/fpsimd.c | 137 +++++++++++++++++++++++++++++++++++++++- include/uapi/linux/prctl.h | 5 ++ 3 files changed, 149 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 9bbd74c0ea6b..86f550ce7b4d 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -20,6 +20,7 @@ #ifndef __ASSEMBLY__ +#include #include /* @@ -70,17 +71,24 @@ extern void fpsimd_update_current_state(struct fpsimd_state *state); extern void fpsimd_flush_task_state(struct task_struct *target); +/* Maximum VL that SVE VL-agnostic software can transparently support */ +#define SVE_VL_ARCH_MAX 0x100 + extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); +extern int __ro_after_init sve_max_vl; + #ifdef CONFIG_ARM64_SVE extern size_t sve_state_size(struct task_struct const *task); extern void sve_alloc(struct task_struct *task); extern void fpsimd_release_task(struct task_struct *task); +extern int sve_set_vector_length(struct task_struct *task, + unsigned long vl, unsigned long flags); #else /* ! CONFIG_ARM64_SVE */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index e7733fb19388..667be3472114 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -17,8 +17,10 @@ * along with this program. If not, see . */ +#include #include #include +#include #include #include #include @@ -28,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -114,6 +117,20 @@ static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); /* Default VL for tasks that don't set it explicitly: */ static int sve_default_vl = SVE_VL_MIN; +#ifdef CONFIG_ARM64_SVE + +/* Maximum supported vector length across all CPUs (initially poisoned) */ +int __ro_after_init sve_max_vl = -1; +/* Set of available vector lengths, as vq_to_bit(vq): */ +static DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); + +#else /* ! CONFIG_ARM64_SVE */ + +/* Dummy declaration for code that will be optimised out: */ +extern DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); + +#endif /* ! CONFIG_ARM64_SVE */ + /* * Call __sve_free() directly only if you know task can't be scheduled * or preempted. @@ -271,6 +288,50 @@ static void task_fpsimd_save(void) } } +/* + * Helpers to translate bit indices in sve_vq_map to VQ values (and + * vice versa). This allows find_next_bit() to be used to find the + * _maximum_ VQ not exceeding a certain value. + */ + +static unsigned int vq_to_bit(unsigned int vq) +{ + return SVE_VQ_MAX - vq; +} + +static unsigned int bit_to_vq(unsigned int bit) +{ + if (WARN_ON(bit >= SVE_VQ_MAX)) + bit = SVE_VQ_MAX - 1; + + return SVE_VQ_MAX - bit; +} + +/* + * All vector length selection from userspace comes through here. + * We're on a slow path, so some sanity-checks are included. + * If things go wrong there's a bug somewhere, but try to fall back to a + * safe choice. + */ +static unsigned int find_supported_vector_length(unsigned int vl) +{ + int bit; + int max_vl = sve_max_vl; + + if (WARN_ON(!sve_vl_valid(vl))) + vl = SVE_VL_MIN; + + if (WARN_ON(!sve_vl_valid(max_vl))) + max_vl = SVE_VL_MIN; + + if (vl > max_vl) + vl = max_vl; + + bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, + vq_to_bit(sve_vq_from_vl(vl))); + return sve_vl_from_vq(bit_to_vq(bit)); +} + #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) @@ -365,6 +426,76 @@ void sve_alloc(struct task_struct *task) BUG_ON(!task->thread.sve_state); } +int sve_set_vector_length(struct task_struct *task, + unsigned long vl, unsigned long flags) +{ + if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | + PR_SVE_SET_VL_ONEXEC)) + return -EINVAL; + + if (!sve_vl_valid(vl)) + return -EINVAL; + + /* + * Clamp to the maximum vector length that VL-agnostic SVE code can + * work with. A flag may be assigned in the future to allow setting + * of larger vector lengths without confusing older software. + */ + if (vl > SVE_VL_ARCH_MAX) + vl = SVE_VL_ARCH_MAX; + + vl = find_supported_vector_length(vl); + + if (flags & (PR_SVE_VL_INHERIT | + PR_SVE_SET_VL_ONEXEC)) + task->thread.sve_vl_onexec = vl; + else + /* Reset VL to system default on next exec: */ + task->thread.sve_vl_onexec = 0; + + /* Only actually set the VL if not deferred: */ + if (flags & PR_SVE_SET_VL_ONEXEC) + goto out; + + if (vl == task->thread.sve_vl) + goto out; + + /* + * To ensure the FPSIMD bits of the SVE vector registers are preserved, + * write any live register state back to task_struct, and convert to a + * non-SVE thread. + */ + if (task == current) { + local_bh_disable(); + + task_fpsimd_save(); + set_thread_flag(TIF_FOREIGN_FPSTATE); + } + + fpsimd_flush_task_state(task); + if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) + sve_to_fpsimd(task); + + if (task == current) + local_bh_enable(); + + /* + * Force reallocation of task SVE state to the correct size + * on next use: + */ + sve_free(task); + + task->thread.sve_vl = vl; + +out: + if (flags & PR_SVE_VL_INHERIT) + set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT); + else + clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT); + + return 0; +} + /* * Called from the put_task_struct() path, which cannot get here * unless dead_task is really dead and not schedulable. @@ -481,7 +612,7 @@ void fpsimd_thread_switch(struct task_struct *next) void fpsimd_flush_thread(void) { - int vl; + int vl, supported_vl; if (!system_supports_fpsimd()) return; @@ -509,6 +640,10 @@ void fpsimd_flush_thread(void) if (WARN_ON(!sve_vl_valid(vl))) vl = SVE_VL_MIN; + supported_vl = find_supported_vector_length(vl); + if (WARN_ON(supported_vl != vl)) + vl = supported_vl; + current->thread.sve_vl = vl; /* diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index a8d0759a9e40..1b64901ca6b3 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -197,4 +197,9 @@ struct prctl_mm_map { # define PR_CAP_AMBIENT_LOWER 3 # define PR_CAP_AMBIENT_CLEAR_ALL 4 +/* arm64 Scalable Vector Extension controls */ +# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +# define PR_SVE_VL_LEN_MASK 0xffff +# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ + #endif /* _LINUX_PRCTL_H */ -- cgit From 8f1eec57cdccd17faeaf7f593505124376340fd0 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:09 +0000 Subject: arm64: cpufeature: Move sys_caps_initialised declarations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit update_cpu_features() currently cannot tell whether it is being called during early or late secondary boot. This doesn't desperately matter for anything it currently does. However, SVE will need to know here whether the set of available vector lengths is known or still to be determined when booting a CPU, so that it can be updated appropriately. This patch simply moves the sys_caps_initialised stuff to the top of the file so that it can be used more widely. There doesn't seem to be a more obvious place to put it. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Suzuki K Poulose Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 250e8cfff67d..0e192cbe4149 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -51,6 +51,21 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); +/* + * Flag to indicate if we have computed the system wide + * capabilities based on the boot time active CPUs. This + * will be used to determine if a new booting CPU should + * go through the verification process to make sure that it + * supports the system capabilities, without using a hotplug + * notifier. + */ +static bool sys_caps_initialised; + +static inline void set_sys_caps_initialised(void) +{ + sys_caps_initialised = true; +} + static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) { /* file-wide pr_fmt adds "CPU features: " prefix */ @@ -1049,21 +1064,6 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) } } -/* - * Flag to indicate if we have computed the system wide - * capabilities based on the boot time active CPUs. This - * will be used to determine if a new booting CPU should - * go through the verification process to make sure that it - * supports the system capabilities, without using a hotplug - * notifier. - */ -static bool sys_caps_initialised; - -static inline void set_sys_caps_initialised(void) -{ - sys_caps_initialised = true; -} - /* * Check for CPU features that are used in early boot * based on the Boot CPU value. -- cgit From 2e0f2478ea37eba945bee007884a2988b8f7d332 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:10 +0000 Subject: arm64/sve: Probe SVE capabilities and usable vector lengths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch uses the cpufeatures framework to determine common SVE capabilities and vector lengths, and configures the runtime SVE support code appropriately. ZCR_ELx is not really a feature register, but it is convenient to use it as a template for recording the maximum vector length supported by a CPU, using the LEN field. This field is similar to a feature field in that it is a contiguous bitfield for which we want to determine the minimum system-wide value. This patch adds ZCR as a pseudo-register in cpuinfo/cpufeatures, with appropriate custom code to populate it. Finding the minimum supported value of the LEN field is left to the cpufeatures framework in the usual way. The meaning of ID_AA64ZFR0_EL1 is not architecturally defined yet, so for now we just require it to be zero. Note that much of this code is dormant and SVE still won't be used yet, since system_supports_sve() remains hardwired to false. Signed-off-by: Dave Martin Reviewed-by: Suzuki K Poulose Reviewed-by: Catalin Marinas Cc: Alex Bennée Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpu.h | 4 ++ arch/arm64/include/asm/cpufeature.h | 36 ++++++++++++ arch/arm64/include/asm/fpsimd.h | 14 +++++ arch/arm64/kernel/cpufeature.c | 52 ++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 6 ++ arch/arm64/kernel/fpsimd.c | 114 +++++++++++++++++++++++++++++++++++- 6 files changed, 223 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 889226b4c6e1..88392272250e 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -41,6 +41,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64mmfr2; u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; + u64 reg_id_aa64zfr0; u32 reg_id_dfr0; u32 reg_id_isar0; @@ -59,6 +60,9 @@ struct cpuinfo_arm64 { u32 reg_mvfr0; u32 reg_mvfr1; u32 reg_mvfr2; + + /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */ + u64 reg_zcr; }; DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4ea3441a8fa7..9b27e8c10086 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -10,7 +10,9 @@ #define __ASM_CPUFEATURE_H #include +#include #include +#include #include /* @@ -223,6 +225,13 @@ static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) return val == ID_AA64PFR0_EL0_32BIT_64BIT; } +static inline bool id_aa64pfr0_sve(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); + + return val > 0; +} + void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, @@ -267,6 +276,33 @@ static inline bool system_supports_sve(void) return false; } +/* + * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE + * vector length. + * + * Use only if SVE is present. + * This function clobbers the SVE vector length. + */ +static inline u64 read_zcr_features(void) +{ + u64 zcr; + unsigned int vq_max; + + /* + * Set the maximum possible VL, and write zeroes to all other + * bits to see if they stick. + */ + sve_kernel_enable(NULL); + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); + + zcr = read_sysreg_s(SYS_ZCR_EL1); + zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ + vq_max = sve_vq_from_vl(sve_get_vl()); + zcr |= vq_max - 1; /* set LEN field to maximum effective value */ + + return zcr; +} + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 86f550ce7b4d..d8e0dc9f65a1 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -78,6 +78,7 @@ extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); +extern int sve_kernel_enable(void *); extern int __ro_after_init sve_max_vl; @@ -90,10 +91,23 @@ extern void fpsimd_release_task(struct task_struct *task); extern int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags); +/* + * Probing and setup functions. + * Calls to these functions must be serialised with one another. + */ +extern void __init sve_init_vq_map(void); +extern void sve_update_vq_map(void); +extern int sve_verify_vq_map(void); +extern void __init sve_setup(void); + #else /* ! CONFIG_ARM64_SVE */ static inline void sve_alloc(struct task_struct *task) { } static inline void fpsimd_release_task(struct task_struct *task) { } +static inline void sve_init_vq_map(void) { } +static inline void sve_update_vq_map(void) { } +static inline int sve_verify_vq_map(void) { return 0; } +static inline void sve_setup(void) { } #endif /* ! CONFIG_ARM64_SVE */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0e192cbe4149..036ad9d1b442 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -287,6 +288,12 @@ static const struct arm64_ftr_bits ftr_id_dfr0[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_zcr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */ + ARM64_FTR_END, +}; + /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -353,6 +360,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 4 */ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz), + ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz), /* Op1 = 0, CRn = 0, CRm = 5 */ ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), @@ -367,6 +375,9 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), + /* Op1 = 0, CRn = 1, CRm = 2 */ + ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), + /* Op1 = 3, CRn = 0, CRm = 0 */ { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), @@ -504,6 +515,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); + init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); @@ -524,6 +536,10 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); } + if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { + init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); + sve_init_vq_map(); + } } static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) @@ -627,6 +643,9 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); + taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, + info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); + /* * If we have AArch32, we care about 32-bit features for compat. * If the system doesn't support AArch32, don't update them. @@ -674,6 +693,16 @@ void update_cpu_features(int cpu, info->reg_mvfr2, boot->reg_mvfr2); } + if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { + taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, + info->reg_zcr, boot->reg_zcr); + + /* Probe vector lengths, unless we already gave up on SVE */ + if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && + !sys_caps_initialised) + sve_update_vq_map(); + } + /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. @@ -1106,6 +1135,23 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) } } +static void verify_sve_features(void) +{ + u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); + u64 zcr = read_zcr_features(); + + unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; + unsigned int len = zcr & ZCR_ELx_LEN_MASK; + + if (len < safe_len || sve_verify_vq_map()) { + pr_crit("CPU%d: SVE: required vector length(s) missing\n", + smp_processor_id()); + cpu_die_early(); + } + + /* Add checks on other ZCR bits here if necessary */ +} + /* * Run through the enabled system capabilities and enable() it on this CPU. * The capabilities were decided based on the available CPUs at the boot time. @@ -1119,8 +1165,12 @@ static void verify_local_cpu_capabilities(void) verify_local_cpu_errata_workarounds(); verify_local_cpu_features(arm64_features); verify_local_elf_hwcaps(arm64_elf_hwcaps); + if (system_supports_32bit_el0()) verify_local_elf_hwcaps(compat_elf_hwcaps); + + if (system_supports_sve()) + verify_sve_features(); } void check_local_cpu_capabilities(void) @@ -1198,6 +1248,8 @@ void __init setup_cpu_features(void) if (system_supports_32bit_el0()) setup_elf_hwcaps(compat_elf_hwcaps); + sve_setup(); + /* Advertise that we have computed the system capabilities */ set_sys_caps_initialised(); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 1ff1c5a67081..58da504be36e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -331,6 +332,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); /* Update the 32bit ID registers only if AArch32 is implemented */ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { @@ -353,6 +355,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_mvfr2 = read_cpuid(MVFR2_EL1); } + if (IS_ENABLED(CONFIG_ARM64_SVE) && + id_aa64pfr0_sve(info->reg_id_aa64pfr0)) + info->reg_zcr = read_zcr_features(); + cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 667be3472114..715398e91d65 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -115,19 +115,19 @@ static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); /* Default VL for tasks that don't set it explicitly: */ -static int sve_default_vl = SVE_VL_MIN; +static int sve_default_vl = -1; #ifdef CONFIG_ARM64_SVE /* Maximum supported vector length across all CPUs (initially poisoned) */ int __ro_after_init sve_max_vl = -1; /* Set of available vector lengths, as vq_to_bit(vq): */ -static DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); #else /* ! CONFIG_ARM64_SVE */ /* Dummy declaration for code that will be optimised out: */ -extern DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); #endif /* ! CONFIG_ARM64_SVE */ @@ -496,6 +496,111 @@ out: return 0; } +/* + * Bitmap for temporary storage of the per-CPU set of supported vector lengths + * during secondary boot. + */ +static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); + +static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) +{ + unsigned int vq, vl; + unsigned long zcr; + + bitmap_zero(map, SVE_VQ_MAX); + + zcr = ZCR_ELx_LEN_MASK; + zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr; + + for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) { + write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ + vl = sve_get_vl(); + vq = sve_vq_from_vl(vl); /* skip intervening lengths */ + set_bit(vq_to_bit(vq), map); + } +} + +void __init sve_init_vq_map(void) +{ + sve_probe_vqs(sve_vq_map); +} + +/* + * If we haven't committed to the set of supported VQs yet, filter out + * those not supported by the current CPU. + */ +void sve_update_vq_map(void) +{ + sve_probe_vqs(sve_secondary_vq_map); + bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); +} + +/* Check whether the current CPU supports all VQs in the committed set */ +int sve_verify_vq_map(void) +{ + int ret = 0; + + sve_probe_vqs(sve_secondary_vq_map); + bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, + SVE_VQ_MAX); + if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { + pr_warn("SVE: cpu%d: Required vector length(s) missing\n", + smp_processor_id()); + ret = -EINVAL; + } + + return ret; +} + +/* + * Enable SVE for EL1. + * Intended for use by the cpufeatures code during CPU boot. + */ +int sve_kernel_enable(void *__always_unused p) +{ + write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); + isb(); + + return 0; +} + +void __init sve_setup(void) +{ + u64 zcr; + + if (!system_supports_sve()) + return; + + /* + * The SVE architecture mandates support for 128-bit vectors, + * so sve_vq_map must have at least SVE_VQ_MIN set. + * If something went wrong, at least try to patch it up: + */ + if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) + set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); + + zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); + sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); + + /* + * Sanity-check that the max VL we determined through CPU features + * corresponds properly to sve_vq_map. If not, do our best: + */ + if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl))) + sve_max_vl = find_supported_vector_length(sve_max_vl); + + /* + * For the default VL, pick the maximum supported value <= 64. + * VL == 64 is guaranteed not to grow the signal frame. + */ + sve_default_vl = find_supported_vector_length(64); + + pr_info("SVE: maximum available vector length %u bytes per vector\n", + sve_max_vl); + pr_info("SVE: default vector length %u bytes per vector\n", + sve_default_vl); +} + /* * Called from the put_task_struct() path, which cannot get here * unless dead_task is really dead and not schedulable. @@ -631,6 +736,9 @@ void fpsimd_flush_thread(void) * This is where we ensure that all user tasks have a valid * vector length configured: no kernel task can become a user * task without an exec and hence a call to this function. + * By the time the first call to this function is made, all + * early hardware probing is complete, so sve_default_vl + * should be valid. * If a bug causes this to go wrong, we make some noise and * try to fudge thread.sve_vl to a safe value here. */ -- cgit From 1bd3f93641ec710015fcb3badca07dacd58c91d3 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:11 +0000 Subject: arm64/sve: Preserve SVE registers around kernel-mode NEON use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kernel-mode NEON will corrupt the SVE vector registers, due to the way they alias the FPSIMD vector registers in the hardware. This patch ensures that any live SVE register content for the task is saved by kernel_neon_begin(). The data will be restored in the usual way on return to userspace. Signed-off-by: Dave Martin Reviewed-by: Ard Biesheuvel Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 715398e91d65..741fbbfa1265 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -886,8 +886,10 @@ void kernel_neon_begin(void) __this_cpu_write(kernel_neon_busy, true); /* Save unsaved task fpsimd state, if any: */ - if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_save_state(¤t->thread.fpsimd_state); + if (current->mm) { + task_fpsimd_save(); + set_thread_flag(TIF_FOREIGN_FPSTATE); + } /* Invalidate any task state remaining in the fpsimd regs: */ __this_cpu_write(fpsimd_last_state, NULL); -- cgit From fdfa976cae5cbd46aa4b4f9e554a93c9f8b35c62 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:12 +0000 Subject: arm64/sve: Preserve SVE registers around EFI runtime service calls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The EFI runtime services ABI allows EFI to make free use of the FPSIMD registers during EFI runtime service calls, subject to the callee-save requirements of the AArch64 procedure call standard. However, the SVE architecture allows upper bits of the SVE vector registers to be zeroed as a side-effect of FPSIMD V-register writes. This means that the SVE vector registers must be saved in their entirety in order to avoid data loss: non-SVE-aware EFI implementations cannot restore them correctly. The non-IRQ case is already handled gracefully by kernel_neon_begin(). For the IRQ case, this patch allocates a suitable per-CPU stash buffer for the full SVE register state and uses it to preserve the affected registers around EFI calls. It is currently unclear how the EFI runtime services ABI will be clarified with respect to SVE, so it safest to assume that the predicate registers and FFR must be saved and restored too. No attempt is made to restore the restore the vector length after a call, for now. It is deemed rather insane for EFI to change it, and contemporary EFI implementations certainly won't. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Cc: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 67 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 741fbbfa1265..1e531156f1d7 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -123,11 +123,13 @@ static int sve_default_vl = -1; int __ro_after_init sve_max_vl = -1; /* Set of available vector lengths, as vq_to_bit(vq): */ static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +static void __percpu *efi_sve_state; #else /* ! CONFIG_ARM64_SVE */ /* Dummy declaration for code that will be optimised out: */ extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +extern void __percpu *efi_sve_state; #endif /* ! CONFIG_ARM64_SVE */ @@ -552,6 +554,30 @@ int sve_verify_vq_map(void) return ret; } +static void __init sve_efi_setup(void) +{ + if (!IS_ENABLED(CONFIG_EFI)) + return; + + /* + * alloc_percpu() warns and prints a backtrace if this goes wrong. + * This is evidence of a crippled system and we are returning void, + * so no attempt is made to handle this situation here. + */ + if (!sve_vl_valid(sve_max_vl)) + goto fail; + + efi_sve_state = __alloc_percpu( + SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES); + if (!efi_sve_state) + goto fail; + + return; + +fail: + panic("Cannot allocate percpu memory for EFI SVE save/restore"); +} + /* * Enable SVE for EL1. * Intended for use by the cpufeatures code during CPU boot. @@ -599,6 +625,8 @@ void __init sve_setup(void) sve_max_vl); pr_info("SVE: default vector length %u bytes per vector\n", sve_default_vl); + + sve_efi_setup(); } /* @@ -927,6 +955,7 @@ EXPORT_SYMBOL(kernel_neon_end); static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); +static DEFINE_PER_CPU(bool, efi_sve_state_used); /* * EFI runtime services support functions @@ -952,10 +981,24 @@ void __efi_fpsimd_begin(void) WARN_ON(preemptible()); - if (may_use_simd()) + if (may_use_simd()) { kernel_neon_begin(); - else { - fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); + } else { + /* + * If !efi_sve_state, SVE can't be in use yet and doesn't need + * preserving: + */ + if (system_supports_sve() && likely(efi_sve_state)) { + char *sve_state = this_cpu_ptr(efi_sve_state); + + __this_cpu_write(efi_sve_state_used, true); + + sve_save_state(sve_state + sve_ffr_offset(sve_max_vl), + &this_cpu_ptr(&efi_fpsimd_state)->fpsr); + } else { + fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); + } + __this_cpu_write(efi_fpsimd_state_used, true); } } @@ -968,10 +1011,22 @@ void __efi_fpsimd_end(void) if (!system_supports_fpsimd()) return; - if (__this_cpu_xchg(efi_fpsimd_state_used, false)) - fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state)); - else + if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) { kernel_neon_end(); + } else { + if (system_supports_sve() && + likely(__this_cpu_read(efi_sve_state_used))) { + char const *sve_state = this_cpu_ptr(efi_sve_state); + + sve_load_state(sve_state + sve_ffr_offset(sve_max_vl), + &this_cpu_ptr(&efi_fpsimd_state)->fpsr, + sve_vq_from_vl(sve_get_vl()) - 1); + + __this_cpu_write(efi_sve_state_used, false); + } else { + fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state)); + } + } } #endif /* CONFIG_EFI */ -- cgit From 43d4da2c45b2f5d62f8a79ff7c6f95089bb24656 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:13 +0000 Subject: arm64/sve: ptrace and ELF coredump support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch defines and implements a new regset NT_ARM_SVE, which describes a thread's SVE register state. This allows a debugger to manipulate the SVE state, as well as being included in ELF coredumps for post-mortem debugging. Because the regset size and layout are dependent on the thread's current vector length, it is not possible to define a C struct to describe the regset contents as is done for existing regsets. Instead, and for the same reasons, NT_ARM_SVE is based on the freeform variable-layout approach used for the SVE signal frame. Additionally, to reduce debug overhead when debugging threads that might or might not have live SVE register state, NT_ARM_SVE may be presented in one of two different formats: the old struct user_fpsimd_state format is embedded for describing the state of a thread with no live SVE state, whereas a new variable-layout structure is embedded for describing live SVE state. This avoids a debugger needing to poll NT_PRFPREG in addition to NT_ARM_SVE, and allows existing userspace code to handle the non-SVE case without too much modification. For this to work, NT_ARM_SVE is defined with a fixed-format header of type struct user_sve_header, which the recipient can use to figure out the content, size and layout of the reset of the regset. Accessor macros are defined to allow the vector-length-dependent parts of the regset to be manipulated. Signed-off-by: Alan Hayward Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alex Bennée Cc: Okamoto Takayuki Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 12 +- arch/arm64/include/uapi/asm/ptrace.h | 138 +++++++++++++++++ arch/arm64/kernel/fpsimd.c | 60 ++++++++ arch/arm64/kernel/ptrace.c | 280 ++++++++++++++++++++++++++++++++++- include/uapi/linux/elf.h | 1 + 5 files changed, 482 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index d8e0dc9f65a1..d754e5a6949c 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -38,13 +38,16 @@ struct fpsimd_state { __uint128_t vregs[32]; u32 fpsr; u32 fpcr; + /* + * For ptrace compatibility, pad to next 128-bit + * boundary here if extending this struct. + */ }; }; /* the id of the last cpu to have restored this state */ unsigned int cpu; }; - #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f @@ -88,6 +91,10 @@ extern size_t sve_state_size(struct task_struct const *task); extern void sve_alloc(struct task_struct *task); extern void fpsimd_release_task(struct task_struct *task); +extern void fpsimd_sync_to_sve(struct task_struct *task); +extern void sve_sync_to_fpsimd(struct task_struct *task); +extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); + extern int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags); @@ -104,6 +111,9 @@ extern void __init sve_setup(void); static inline void sve_alloc(struct task_struct *task) { } static inline void fpsimd_release_task(struct task_struct *task) { } +static inline void sve_sync_to_fpsimd(struct task_struct *task) { } +static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 3697d95ba0a1..e7085589f81c 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -22,6 +22,7 @@ #include #include +#include /* @@ -62,6 +63,8 @@ #ifndef __ASSEMBLY__ +#include + /* * User structures for general purpose, floating point and debug registers. */ @@ -89,6 +92,141 @@ struct user_hwdebug_state { } dbg_regs[16]; }; +/* SVE/FP/SIMD state (NT_ARM_SVE) */ + +struct user_sve_header { + __u32 size; /* total meaningful regset content in bytes */ + __u32 max_size; /* maxmium possible size for this thread */ + __u16 vl; /* current vector length */ + __u16 max_vl; /* maximum possible vector length */ + __u16 flags; + __u16 __reserved; +}; + +/* Definitions for user_sve_header.flags: */ +#define SVE_PT_REGS_MASK (1 << 0) + +#define SVE_PT_REGS_FPSIMD 0 +#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK + +/* + * Common SVE_PT_* flags: + * These must be kept in sync with prctl interface in + */ +#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) +#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) + + +/* + * The remainder of the SVE state follows struct user_sve_header. The + * total size of the SVE state (including header) depends on the + * metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size + * of the state in bytes, including the header. + * + * Refer to for details of how to pass the correct + * "vq" argument to these macros. + */ + +/* Offset from the start of struct user_sve_header to the register data */ +#define SVE_PT_REGS_OFFSET \ + ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ + / SVE_VQ_BYTES * SVE_VQ_BYTES) + +/* + * The register data content and layout depends on the value of the + * flags field. + */ + +/* + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case: + * + * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type + * struct user_fpsimd_state. Additional data might be appended in the + * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size. + * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than + * sizeof(struct user_fpsimd_state). + */ + +#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET + +#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state)) + +/* + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case: + * + * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size + * SVE_PT_SVE_SIZE(vq, flags). + * + * Additional macros describe the contents and layout of the payload. + * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to + * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is + * the size in bytes: + * + * x type description + * - ---- ----------- + * ZREGS \ + * ZREG | + * PREGS | refer to + * PREG | + * FFR / + * + * FPSR uint32_t FPSR + * FPCR uint32_t FPCR + * + * Additional data might be appended in the future. + */ + +#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) +#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) +#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) +#define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) +#define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) + +#define __SVE_SIG_TO_PT(offset) \ + ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) + +#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET + +#define SVE_PT_SVE_ZREGS_OFFSET \ + __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) +#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ + __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) +#define SVE_PT_SVE_ZREGS_SIZE(vq) \ + (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) + +#define SVE_PT_SVE_PREGS_OFFSET(vq) \ + __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) +#define SVE_PT_SVE_PREG_OFFSET(vq, n) \ + __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) +#define SVE_PT_SVE_PREGS_SIZE(vq) \ + (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ + SVE_PT_SVE_PREGS_OFFSET(vq)) + +#define SVE_PT_SVE_FFR_OFFSET(vq) \ + __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) + +#define SVE_PT_SVE_FPSR_OFFSET(vq) \ + ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ + (SVE_VQ_BYTES - 1)) \ + / SVE_VQ_BYTES * SVE_VQ_BYTES) +#define SVE_PT_SVE_FPCR_OFFSET(vq) \ + (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) + +/* + * Any future extension appended after FPCR must be aligned to the next + * 128-bit boundary. + */ + +#define SVE_PT_SVE_SIZE(vq, flags) \ + ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ + - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ + / SVE_VQ_BYTES * SVE_VQ_BYTES) + +#define SVE_PT_SIZE(vq, flags) \ + (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ + SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ + : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 1e531156f1d7..b82d44693b9d 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -428,6 +428,66 @@ void sve_alloc(struct task_struct *task) BUG_ON(!task->thread.sve_state); } + +/* + * Ensure that task->thread.sve_state is up to date with respect to + * the user task, irrespective of when SVE is in use or not. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void fpsimd_sync_to_sve(struct task_struct *task) +{ + if (!test_tsk_thread_flag(task, TIF_SVE)) + fpsimd_to_sve(task); +} + +/* + * Ensure that task->thread.fpsimd_state is up to date with respect to + * the user task, irrespective of whether SVE is in use or not. + * + * This should only be called by ptrace. task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + */ +void sve_sync_to_fpsimd(struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SVE)) + sve_to_fpsimd(task); +} + +/* + * Ensure that task->thread.sve_state is up to date with respect to + * the task->thread.fpsimd_state. + * + * This should only be called by ptrace to merge new FPSIMD register + * values into a task for which SVE is currently active. + * task must be non-runnable. + * task->thread.sve_state must point to at least sve_state_size(task) + * bytes of allocated kernel memory. + * task->thread.fpsimd_state must already have been initialised with + * the new FPSIMD register values to be merged in. + */ +void sve_sync_from_fpsimd_zeropad(struct task_struct *task) +{ + unsigned int vq; + void *sst = task->thread.sve_state; + struct fpsimd_state const *fst = &task->thread.fpsimd_state; + unsigned int i; + + if (!test_tsk_thread_flag(task, TIF_SVE)) + return; + + vq = sve_vq_from_vl(task->thread.sve_vl); + + memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); + + for (i = 0; i < 32; ++i) + memcpy(ZREG(sst, vq, i), &fst->vregs[i], + sizeof(fst->vregs[i])); +} + int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags) { diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9cbb6123208f..7c44658b316d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include +#include #include #include #include @@ -618,17 +620,56 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, /* * TODO: update fp accessors for lazy context switching (sync/flush hwstate) */ -static int fpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) +static int __fpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf, unsigned int start_pos) { struct user_fpsimd_state *uregs; + + sve_sync_to_fpsimd(target); + uregs = &target->thread.fpsimd_state.user_fpsimd; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, + start_pos, start_pos + sizeof(*uregs)); +} + +static int fpr_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ if (target == current) fpsimd_preserve_current_state(); - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); + return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); +} + +static int __fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf, + unsigned int start_pos) +{ + int ret; + struct user_fpsimd_state newstate; + + /* + * Ensure target->thread.fpsimd_state is up to date, so that a + * short copyin can't resurrect stale data. + */ + sve_sync_to_fpsimd(target); + + newstate = target->thread.fpsimd_state.user_fpsimd; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, + start_pos, start_pos + sizeof(newstate)); + if (ret) + return ret; + + target->thread.fpsimd_state.user_fpsimd = newstate; + + return ret; } static int fpr_set(struct task_struct *target, const struct user_regset *regset, @@ -636,15 +677,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_fpsimd_state newstate = - target->thread.fpsimd_state.user_fpsimd; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); if (ret) return ret; - target->thread.fpsimd_state.user_fpsimd = newstate; + sve_sync_from_fpsimd_zeropad(target); fpsimd_flush_task_state(target); + return ret; } @@ -702,6 +742,215 @@ static int system_call_set(struct task_struct *target, return ret; } +#ifdef CONFIG_ARM64_SVE + +static void sve_init_header_from_task(struct user_sve_header *header, + struct task_struct *target) +{ + unsigned int vq; + + memset(header, 0, sizeof(*header)); + + header->flags = test_tsk_thread_flag(target, TIF_SVE) ? + SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; + if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) + header->flags |= SVE_PT_VL_INHERIT; + + header->vl = target->thread.sve_vl; + vq = sve_vq_from_vl(header->vl); + + header->max_vl = sve_max_vl; + if (WARN_ON(!sve_vl_valid(sve_max_vl))) + header->max_vl = header->vl; + + header->size = SVE_PT_SIZE(vq, header->flags); + header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), + SVE_PT_REGS_SVE); +} + +static unsigned int sve_size_from_header(struct user_sve_header const *header) +{ + return ALIGN(header->size, SVE_VQ_BYTES); +} + +static unsigned int sve_get_size(struct task_struct *target, + const struct user_regset *regset) +{ + struct user_sve_header header; + + if (!system_supports_sve()) + return 0; + + sve_init_header_from_task(&header, target); + return sve_size_from_header(&header); +} + +static int sve_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + struct user_sve_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sve()) + return -EINVAL; + + /* Header */ + sve_init_header_from_task(&header, target); + vq = sve_vq_from_vl(header.vl); + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + return ret; + + if (target == current) + fpsimd_preserve_current_state(); + + /* Registers: FPSIMD-only case */ + + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); + if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) + return __fpr_get(target, regset, pos, count, kbuf, ubuf, + SVE_PT_FPSIMD_OFFSET); + + /* Otherwise: full SVE case */ + + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + target->thread.sve_state, + start, end); + if (ret) + return ret; + + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + start, end); + if (ret) + return ret; + + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpsimd_state.fpsr, + start, end); + if (ret) + return ret; + + start = end; + end = sve_size_from_header(&header); + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + start, end); +} + +static int sve_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_sve_header header; + unsigned int vq; + unsigned long start, end; + + if (!system_supports_sve()) + return -EINVAL; + + /* Header */ + if (count < sizeof(header)) + return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, + 0, sizeof(header)); + if (ret) + goto out; + + /* + * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by + * sve_set_vector_length(), which will also validate them for us: + */ + ret = sve_set_vector_length(target, header.vl, + ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); + if (ret) + goto out; + + /* Actual VL set may be less than the user asked for: */ + vq = sve_vq_from_vl(target->thread.sve_vl); + + /* Registers: FPSIMD-only case */ + + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); + if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, + SVE_PT_FPSIMD_OFFSET); + clear_tsk_thread_flag(target, TIF_SVE); + goto out; + } + + /* Otherwise: full SVE case */ + + /* + * If setting a different VL from the requested VL and there is + * register data, the data layout will be wrong: don't even + * try to set the registers in this case. + */ + if (count && vq != sve_vq_from_vl(header.vl)) { + ret = -EIO; + goto out; + } + + sve_alloc(target); + + /* + * Ensure target->thread.sve_state is up to date with target's + * FPSIMD regs, so that a short copyin leaves trailing registers + * unmodified. + */ + fpsimd_sync_to_sve(target); + set_tsk_thread_flag(target, TIF_SVE); + + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); + start = SVE_PT_SVE_OFFSET; + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.sve_state, + start, end); + if (ret) + goto out; + + start = end; + end = SVE_PT_SVE_FPSR_OFFSET(vq); + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + start, end); + if (ret) + goto out; + + /* + * Copy fpsr, and fpcr which must follow contiguously in + * struct fpsimd_state: + */ + start = end; + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpsimd_state.fpsr, + start, end); + +out: + fpsimd_flush_task_state(target); + return ret; +} + +#endif /* CONFIG_ARM64_SVE */ + enum aarch64_regset { REGSET_GPR, REGSET_FPR, @@ -711,6 +960,9 @@ enum aarch64_regset { REGSET_HW_WATCH, #endif REGSET_SYSTEM_CALL, +#ifdef CONFIG_ARM64_SVE + REGSET_SVE, +#endif }; static const struct user_regset aarch64_regsets[] = { @@ -768,6 +1020,18 @@ static const struct user_regset aarch64_regsets[] = { .get = system_call_get, .set = system_call_set, }, +#ifdef CONFIG_ARM64_SVE + [REGSET_SVE] = { /* Scalable Vector Extension */ + .core_note_type = NT_ARM_SVE, + .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + SVE_VQ_BYTES), + .size = SVE_VQ_BYTES, + .align = SVE_VQ_BYTES, + .get = sve_get, + .set = sve_set, + .get_size = sve_get_size, + }, +#endif }; static const struct user_regset_view user_aarch64_view = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index b5280db9ef6a..735b8f4d12fc 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -416,6 +416,7 @@ typedef struct elf64_shdr { #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ +#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ -- cgit From 2d2123bc7c7f843aa9db87720de159a049839862 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:14 +0000 Subject: arm64/sve: Add prctl controls for userspace vector length management MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds two arm64-specific prctls, to permit userspace to control its vector length: * PR_SVE_SET_VL: set the thread's SVE vector length and vector length inheritance mode. * PR_SVE_GET_VL: get the same information. Although these prctls resemble instruction set features in the SVE architecture, they provide additional control: the vector length inheritance mode is Linux-specific and nothing to do with the architecture, and the architecture does not permit EL0 to set its own vector length directly. Both can be used in portable tools without requiring the use of SVE instructions. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alex Bennée [will: Fixed up prctl constants to avoid clash with PDEATHSIG] Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 14 +++++++++++ arch/arm64/include/asm/processor.h | 4 +++ arch/arm64/kernel/fpsimd.c | 50 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 4 +++ kernel/sys.c | 12 +++++++++ 5 files changed, 84 insertions(+) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index d754e5a6949c..b868412c815c 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -17,6 +17,7 @@ #define __ASM_FP_H #include +#include #ifndef __ASSEMBLY__ @@ -98,6 +99,9 @@ extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); extern int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags); +extern int sve_set_current_vl(unsigned long arg); +extern int sve_get_current_vl(void); + /* * Probing and setup functions. * Calls to these functions must be serialised with one another. @@ -114,6 +118,16 @@ static inline void fpsimd_release_task(struct task_struct *task) { } static inline void sve_sync_to_fpsimd(struct task_struct *task) { } static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } +static inline int sve_set_current_vl(unsigned long arg) +{ + return -EINVAL; +} + +static inline int sve_get_current_vl(void) +{ + return -EINVAL; +} + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index c6fddb005dc2..023cacb946c3 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -217,5 +217,9 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ +#define SVE_SET_VL(arg) sve_set_current_vl(arg) +#define SVE_GET_VL() sve_get_current_vl() + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b82d44693b9d..fd3cfdd7f9be 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -558,6 +559,55 @@ out: return 0; } +/* + * Encode the current vector length and flags for return. + * This is only required for prctl(): ptrace has separate fields + * + * flags are as for sve_set_vector_length(). + */ +static int sve_prctl_status(unsigned long flags) +{ + int ret; + + if (flags & PR_SVE_SET_VL_ONEXEC) + ret = current->thread.sve_vl_onexec; + else + ret = current->thread.sve_vl; + + if (test_thread_flag(TIF_SVE_VL_INHERIT)) + ret |= PR_SVE_VL_INHERIT; + + return ret; +} + +/* PR_SVE_SET_VL */ +int sve_set_current_vl(unsigned long arg) +{ + unsigned long vl, flags; + int ret; + + vl = arg & PR_SVE_VL_LEN_MASK; + flags = arg & ~vl; + + if (!system_supports_sve()) + return -EINVAL; + + ret = sve_set_vector_length(current, vl, flags); + if (ret) + return ret; + + return sve_prctl_status(flags); +} + +/* PR_SVE_GET_VL */ +int sve_get_current_vl(void) +{ + if (!system_supports_sve()) + return -EINVAL; + + return sve_prctl_status(0); +} + /* * Bitmap for temporary storage of the per-CPU set of supported vector lengths * during secondary boot. diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 1b64901ca6b3..f60db5db6e8e 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -198,7 +198,11 @@ struct prctl_mm_map { # define PR_CAP_AMBIENT_CLEAR_ALL 4 /* arm64 Scalable Vector Extension controls */ +/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */ +#define PR_SVE_SET_VL 50 /* set task vector length */ # define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */ +#define PR_SVE_GET_VL 51 /* get task vector length */ +/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */ # define PR_SVE_VL_LEN_MASK 0xffff # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ diff --git a/kernel/sys.c b/kernel/sys.c index 9aebc2935013..c541916b38c6 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -110,6 +110,12 @@ #ifndef SET_FP_MODE # define SET_FP_MODE(a,b) (-EINVAL) #endif +#ifndef SVE_SET_VL +# define SVE_SET_VL(a) (-EINVAL) +#endif +#ifndef SVE_GET_VL +# define SVE_GET_VL() (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2385,6 +2391,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_GET_FP_MODE: error = GET_FP_MODE(me); break; + case PR_SVE_SET_VL: + error = SVE_SET_VL(arg2); + break; + case PR_SVE_GET_VL: + error = SVE_GET_VL(); + break; default: error = -EINVAL; break; -- cgit From 4ffa09a939ab6d95655b3aee6ff79de48df95be7 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:15 +0000 Subject: arm64/sve: Add sysctl to set the default vector length for new processes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because of the effect of SVE on the size of the signal frame, the default vector length used for new processes involves a tradeoff between performance of SVE-enabled software on the one hand, and reliability of non-SVE-aware software on the other hand. For this reason, the best choice depends on the repertoire of userspace software in use and is thus best left up to distro maintainers, sysadmins and developers. If CONFIG_SYSCTL and CONFIG_PROC_SYSCTL are enabled, this patch exposes the default vector length in /proc/sys/abi/sve_default_vector_length, where boot scripts or the adventurous can poke it. In common with other arm64 ABI sysctls, this control is currently global: setting it requires CAP_SYS_ADMIN in the root user namespace, but the value set is effective for subsequent execs in all namespaces. The control only affects _new_ processes, however: changing it does not affect the vector length of any existing process. The intended usage model is that if userspace is known to be fully SVE-tolerant (or a developer is curious to find out) then this parameter can be cranked up during system startup. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 62 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index fd3cfdd7f9be..88e5e6aef300 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -335,6 +336,65 @@ static unsigned int find_supported_vector_length(unsigned int vl) return sve_vl_from_vq(bit_to_vq(bit)); } +#ifdef CONFIG_SYSCTL + +static int sve_proc_do_default_vl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + int vl = sve_default_vl; + struct ctl_table tmp_table = { + .data = &vl, + .maxlen = sizeof(vl), + }; + + ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); + if (ret || !write) + return ret; + + /* Writing -1 has the special meaning "set to max": */ + if (vl == -1) { + /* Fail safe if sve_max_vl wasn't initialised */ + if (WARN_ON(!sve_vl_valid(sve_max_vl))) + vl = SVE_VL_MIN; + else + vl = sve_max_vl; + + goto chosen; + } + + if (!sve_vl_valid(vl)) + return -EINVAL; + + vl = find_supported_vector_length(vl); +chosen: + sve_default_vl = vl; + return 0; +} + +static struct ctl_table sve_default_vl_table[] = { + { + .procname = "sve_default_vector_length", + .mode = 0644, + .proc_handler = sve_proc_do_default_vl, + }, + { } +}; + +static int __init sve_sysctl_init(void) +{ + if (system_supports_sve()) + if (!register_sysctl("abi", sve_default_vl_table)) + return -EINVAL; + + return 0; +} + +#else /* ! CONFIG_SYSCTL */ +static int __init sve_sysctl_init(void) { return 0; } +#endif /* ! CONFIG_SYSCTL */ + #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) @@ -1209,6 +1269,6 @@ static int __init fpsimd_init(void) if (!(elf_hwcap & HWCAP_ASIMD)) pr_notice("Advanced SIMD is not implemented\n"); - return 0; + return sve_sysctl_init(); } late_initcall(fpsimd_init); -- cgit From 17eed27b02da88560b4592390952b9a71042ab8b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:16 +0000 Subject: arm64/sve: KVM: Prevent guests from using SVE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Until KVM has full SVE support, guests must not be allowed to execute SVE instructions. This patch enables the necessary traps, and also ensures that the traps are disabled again on exit from the guest so that the host can still use SVE if it wants to. On guest exit, high bits of the SVE Zn registers may have been clobbered as a side-effect the execution of FPSIMD instructions in the guest. The existing KVM host FPSIMD restore code is not sufficient to restore these bits, so this patch explicitly marks the CPU as not containing cached vector state for any task, thus forcing a reload on the next return to userspace. This is an interim measure, in advance of adding full SVE awareness to KVM. This marking of cached vector state in the CPU as invalid is done using __this_cpu_write(fpsimd_last_state, NULL) in fpsimd.c. Due to the repeated use of this rather obscure operation, it makes sense to factor it out as a separate helper with a clearer name. This patch factors it out as fpsimd_flush_cpu_state(), and ports all callers to use it. As a side effect of this refactoring, a this_cpu_write() in fpsimd_cpu_pm_notifier() is changed to __this_cpu_write(). This should be fine, since cpu_pm_enter() is supposed to be called only with interrupts disabled. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Christoffer Dall Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm/include/asm/kvm_host.h | 3 +++ arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/include/asm/kvm_arm.h | 4 +++- arch/arm64/include/asm/kvm_host.h | 11 +++++++++++ arch/arm64/kernel/fpsimd.c | 31 +++++++++++++++++++++++++++++-- arch/arm64/kvm/hyp/switch.c | 6 +++--- virt/kvm/arm/arm.c | 3 +++ 7 files changed, 53 insertions(+), 6 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4a879f6ff13b..242151ea6908 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -293,4 +293,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +/* All host FP/SIMD state is restored on guest exit, so nothing to save: */ +static inline void kvm_fpsimd_flush_cpu_state(void) {} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b868412c815c..74f34392a531 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -74,6 +74,7 @@ extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct fpsimd_state *state); extern void fpsimd_flush_task_state(struct task_struct *target); +extern void sve_flush_cpu_state(void); /* Maximum VL that SVE VL-agnostic software can transparently support */ #define SVE_VL_ARCH_MAX 0x100 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index dbf05370169a..7f069ff37f06 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -186,7 +186,8 @@ #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) -#define CPTR_EL2_DEFAULT 0x000033ff +#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */ +#define CPTR_EL2_DEFAULT CPTR_EL2_RES1 /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_TPMS (1 << 14) @@ -237,5 +238,6 @@ #define CPACR_EL1_FPEN (3 << 20) #define CPACR_EL1_TTA (1 << 28) +#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN) #endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e923b58606e2..674912d7a571 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -384,4 +385,14 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } +/* + * All host FP/SIMD state is restored on guest exit, so nothing needs + * doing here except in the SVE case: +*/ +static inline void kvm_fpsimd_flush_cpu_state(void) +{ + if (system_supports_sve()) + sve_flush_cpu_state(); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 88e5e6aef300..931fd8dca91a 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1050,6 +1050,33 @@ void fpsimd_flush_task_state(struct task_struct *t) t->thread.fpsimd_state.cpu = NR_CPUS; } +static inline void fpsimd_flush_cpu_state(void) +{ + __this_cpu_write(fpsimd_last_state, NULL); +} + +/* + * Invalidate any task SVE state currently held in this CPU's regs. + * + * This is used to prevent the kernel from trying to reuse SVE register data + * that is detroyed by KVM guest enter/exit. This function should go away when + * KVM SVE support is implemented. Don't use it for anything else. + */ +#ifdef CONFIG_ARM64_SVE +void sve_flush_cpu_state(void) +{ + struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state); + struct task_struct *tsk; + + if (!fpstate) + return; + + tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state); + if (test_tsk_thread_flag(tsk, TIF_SVE)) + fpsimd_flush_cpu_state(); +} +#endif /* CONFIG_ARM64_SVE */ + #ifdef CONFIG_KERNEL_MODE_NEON DEFINE_PER_CPU(bool, kernel_neon_busy); @@ -1090,7 +1117,7 @@ void kernel_neon_begin(void) } /* Invalidate any task state remaining in the fpsimd regs: */ - __this_cpu_write(fpsimd_last_state, NULL); + fpsimd_flush_cpu_state(); preempt_disable(); @@ -1211,7 +1238,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, case CPU_PM_ENTER: if (current->mm) task_fpsimd_save(); - this_cpu_write(fpsimd_last_state, NULL); + fpsimd_flush_cpu_state(); break; case CPU_PM_EXIT: if (current->mm) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 35a90b8be3da..951f3ebaff26 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -48,7 +48,7 @@ static void __hyp_text __activate_traps_vhe(void) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; - val &= ~CPACR_EL1_FPEN; + val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN); write_sysreg(val, cpacr_el1); write_sysreg(__kvm_hyp_vector, vbar_el1); @@ -59,7 +59,7 @@ static void __hyp_text __activate_traps_nvhe(void) u64 val; val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TFP; + val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ; write_sysreg(val, cptr_el2); } @@ -117,7 +117,7 @@ static void __hyp_text __deactivate_traps_vhe(void) write_sysreg(mdcr_el2, mdcr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); - write_sysreg(CPACR_EL1_FPEN, cpacr_el1); + write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); } diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b9f68e4add71..4d3cf9c82f5b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -652,6 +652,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) */ preempt_disable(); + /* Flush FP/SIMD state that can't survive guest entry/exit */ + kvm_fpsimd_flush_cpu_state(); + kvm_pmu_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); -- cgit From aac45ffd1f8eff7b919df3deaa06d872b665eb09 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:17 +0000 Subject: arm64/sve: KVM: Treat guest SVE use as undefined instruction execution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When trapping forbidden attempts by a guest to use SVE, we want the guest to see a trap consistent with SVE not being implemented. This patch injects an undefined instruction exception into the guest in response to such an exception. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Acked-by: Catalin Marinas Acked-by: Christoffer Dall Signed-off-by: Will Deacon --- arch/arm64/kvm/handle_exit.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 7debb74843a0..b71247995469 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -147,6 +147,13 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + /* Until SVE is supported for guests: */ + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, @@ -160,6 +167,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_HVC64] = handle_hvc, [ESR_ELx_EC_SMC64] = handle_smc, [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, + [ESR_ELx_EC_SVE] = handle_sve, [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, -- cgit From 07d79fe7c22364de34618e58b92a8f091bd7e03d Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:18 +0000 Subject: arm64/sve: KVM: Hide SVE from CPU features exposed to guests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM guests cannot currently use SVE, because SVE is always configured to trap to EL2. However, a guest that sees SVE reported as present in ID_AA64PFR0_EL1 may legitimately expect that SVE works and try to use it. Instead of working, the guest will receive an injected undef exception, which may cause the guest to oops or go into a spin. To avoid misleading the guest into believing that SVE will work, this patch masks out the SVE field from ID_AA64PFR0_EL1 when a guest attempts to read this register. No support is explicitly added for ID_AA64ZFR0_EL1 either, so that is still emulated as reading as zero, which is consistent with SVE not being implemented. This is a temporary measure, and will be removed in a later series when full KVM support for SVE is implemented. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Acked-by: Marc Zyngier Acked-by: Catalin Marinas Acked-by: Christoffer Dall Signed-off-by: Will Deacon --- arch/arm64/kvm/sys_regs.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b1f75522177c..a0ee9b05e3d4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -897,8 +898,17 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) { u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); + u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - return raz ? 0 : read_sanitised_ftr_reg(id); + if (id == SYS_ID_AA64PFR0_EL1) { + if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) + pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", + task_pid_nr(current)); + + val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + } + + return val; } /* cpufeature ID register access trap handlers */ -- cgit From 43994d824e8443263dc98b151e6326bf677be52e Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:19 +0000 Subject: arm64/sve: Detect SVE and activate runtime support This patch enables detection of hardware SVE support via the cpufeatures framework, and reports its presence to the kernel and userspace via the new ARM64_SVE cpucap and HWCAP_SVE hwcap respectively. Userspace can also detect SVE using ID_AA64PFR0_EL1, using the cpufeatures MRS emulation. When running on hardware that supports SVE, this enables runtime kernel support for SVE, and allows user tasks to execute SVE instructions and make of the of the SVE-specific user/kernel interface extensions implemented by this series. Signed-off-by: Dave Martin Reviewed-by: Suzuki K Poulose Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- Documentation/arm64/cpu-feature-registers.txt | 6 +++++- Documentation/arm64/elf_hwcaps.txt | 4 ++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/cpufeature.h | 3 ++- arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 17 +++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kernel/entry.S | 7 ++++--- 8 files changed, 36 insertions(+), 6 deletions(-) diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt index 011ddfc1e570..bd9b3faab2c4 100644 --- a/Documentation/arm64/cpu-feature-registers.txt +++ b/Documentation/arm64/cpu-feature-registers.txt @@ -142,7 +142,11 @@ infrastructure: x--------------------------------------------------x | Name | bits | visible | |--------------------------------------------------| - | RES0 | [63-28] | n | + | RES0 | [63-36] | n | + |--------------------------------------------------| + | SVE | [35-32] | y | + |--------------------------------------------------| + | RES0 | [31-28] | n | |--------------------------------------------------| | GIC | [27-24] | n | |--------------------------------------------------| diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt index 0ba180522e3c..89edba12a9e0 100644 --- a/Documentation/arm64/elf_hwcaps.txt +++ b/Documentation/arm64/elf_hwcaps.txt @@ -154,3 +154,7 @@ HWCAP_ASIMDDP HWCAP_SHA512 Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002. + +HWCAP_SVE + + Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001. diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8da621627d7c..2ff7c5e8efab 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -40,7 +40,8 @@ #define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_HAS_DCPOP 21 +#define ARM64_SVE 22 -#define ARM64_NCAPS 22 +#define ARM64_NCAPS 23 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9b27e8c10086..ac67cfc2585a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -273,7 +273,8 @@ static inline bool system_uses_ttbr0_pan(void) static inline bool system_supports_sve(void) { - return false; + return IS_ENABLED(CONFIG_ARM64_SVE) && + cpus_have_const_cap(ARM64_SVE); } /* diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a4bad90a3fa3..622941098779 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -41,5 +41,6 @@ #define HWCAP_SM4 (1 << 19) #define HWCAP_ASIMDDP (1 << 20) #define HWCAP_SHA512 (1 << 21) +#define HWCAP_SVE (1 << 22) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 036ad9d1b442..4cb2782f55a1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -145,6 +145,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), @@ -948,6 +949,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 1, }, #endif +#ifdef CONFIG_ARM64_SVE + { + .desc = "Scalable Vector Extension", + .capability = ARM64_SVE, + .def_scope = SCOPE_SYSTEM, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR0_SVE_SHIFT, + .min_field_value = ID_AA64PFR0_SVE, + .matches = has_cpuid_feature, + .enable = sve_kernel_enable, + }, +#endif /* CONFIG_ARM64_SVE */ {}, }; @@ -985,6 +999,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), +#ifdef CONFIG_ARM64_SVE + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), +#endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 58da504be36e..1e2554543506 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -75,6 +75,7 @@ static const char *const hwcap_str[] = { "sm4", "asimddp", "sha512", + "sve", NULL }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a989e234dc32..6d14b8f29b5f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -867,9 +867,10 @@ el0_svc: mov wscno, w8 // syscall number in w8 mov wsc_nr, #__NR_syscalls -#ifndef CONFIG_ARM64_SVE +#ifdef CONFIG_ARM64_SVE +alternative_if_not ARM64_SVE b el0_svc_naked -#else +alternative_else_nop_endif tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set: bic x16, x16, #_TIF_SVE // discard SVE state str x16, [tsk, #TSK_TI_FLAGS] @@ -884,7 +885,7 @@ el0_svc: mrs x9, cpacr_el1 bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0 msr cpacr_el1, x9 // synchronised by eret to el0 -#endif /* CONFIG_ARM64_SVE */ +#endif el0_svc_naked: // compat entry point stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number -- cgit From ce6990813f15f4cabadf325791e35bd4af8152f5 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 31 Oct 2017 15:51:20 +0000 Subject: arm64/sve: Add documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds basic documentation of the user/kernel interface provided by the for SVE. Signed-off-by: Dave Martin Reviewed-by: Catalin Marinas Cc: Alan Hayward Cc: Alex Bennée Cc: Mark Rutland Cc: Michael Kerrisk Cc: Szabolcs Nagy Cc: linux-api@vger.kernel.org Signed-off-by: Will Deacon --- Documentation/arm64/sve.txt | 508 +++++++++++++++++++++++++++++++ arch/arm64/include/uapi/asm/sigcontext.h | 3 + 2 files changed, 511 insertions(+) create mode 100644 Documentation/arm64/sve.txt diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt new file mode 100644 index 000000000000..f128f736b4a5 --- /dev/null +++ b/Documentation/arm64/sve.txt @@ -0,0 +1,508 @@ + Scalable Vector Extension support for AArch64 Linux + =================================================== + +Author: Dave Martin +Date: 4 August 2017 + +This document outlines briefly the interface provided to userspace by Linux in +order to support use of the ARM Scalable Vector Extension (SVE). + +This is an outline of the most important features and issues only and not +intended to be exhaustive. + +This document does not aim to describe the SVE architecture or programmer's +model. To aid understanding, a minimal description of relevant programmer's +model features for SVE is included in Appendix A. + + +1. General +----------- + +* SVE registers Z0..Z31, P0..P15 and FFR and the current vector length VL, are + tracked per-thread. + +* The presence of SVE is reported to userspace via HWCAP_SVE in the aux vector + AT_HWCAP entry. Presence of this flag implies the presence of the SVE + instructions and registers, and the Linux-specific system interfaces + described in this document. SVE is reported in /proc/cpuinfo as "sve". + +* Support for the execution of SVE instructions in userspace can also be + detected by reading the CPU ID register ID_AA64PFR0_EL1 using an MRS + instruction, and checking that the value of the SVE field is nonzero. [3] + + It does not guarantee the presence of the system interfaces described in the + following sections: software that needs to verify that those interfaces are + present must check for HWCAP_SVE instead. + +* Debuggers should restrict themselves to interacting with the target via the + NT_ARM_SVE regset. The recommended way of detecting support for this regset + is to connect to a target process first and then attempt a + ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov). + + +2. Vector length terminology +----------------------------- + +The size of an SVE vector (Z) register is referred to as the "vector length". + +To avoid confusion about the units used to express vector length, the kernel +adopts the following conventions: + +* Vector length (VL) = size of a Z-register in bytes + +* Vector quadwords (VQ) = size of a Z-register in units of 128 bits + +(So, VL = 16 * VQ.) + +The VQ convention is used where the underlying granularity is important, such +as in data structure definitions. In most other situations, the VL convention +is used. This is consistent with the meaning of the "VL" pseudo-register in +the SVE instruction set architecture. + + +3. System call behaviour +------------------------- + +* On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of + Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR + become unspecified on return from a syscall. + +* The SVE registers are not used to pass arguments to or receive results from + any syscall. + +* In practice the affected registers/bits will be preserved or will be replaced + with zeros on return from a syscall, but userspace should not make + assumptions about this. The kernel behaviour may vary on a case-by-case + basis. + +* All other SVE state of a thread, including the currently configured vector + length, the state of the PR_SVE_VL_INHERIT flag, and the deferred vector + length (if any), is preserved across all syscalls, subject to the specific + exceptions for execve() described in section 6. + + In particular, on return from a fork() or clone(), the parent and new child + process or thread share identical SVE configuration, matching that of the + parent before the call. + + +4. Signal handling +------------------- + +* A new signal frame record sve_context encodes the SVE registers on signal + delivery. [1] + +* This record is supplementary to fpsimd_context. The FPSR and FPCR registers + are only present in fpsimd_context. For convenience, the content of V0..V31 + is duplicated between sve_context and fpsimd_context. + +* The signal frame record for SVE always contains basic metadata, in particular + the thread's vector length (in sve_context.vl). + +* The SVE registers may or may not be included in the record, depending on + whether the registers are live for the thread. The registers are present if + and only if: + sve_context.head.size >= SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)). + +* If the registers are present, the remainder of the record has a vl-dependent + size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to + the members. + +* If the SVE context is too big to fit in sigcontext.__reserved[], then extra + space is allocated on the stack, an extra_context record is written in + __reserved[] referencing this space. sve_context is then written in the + extra space. Refer to [1] for further details about this mechanism. + + +5. Signal return +----------------- + +When returning from a signal handler: + +* If there is no sve_context record in the signal frame, or if the record is + present but contains no register data as desribed in the previous section, + then the SVE registers/bits become non-live and take unspecified values. + +* If sve_context is present in the signal frame and contains full register + data, the SVE registers become live and are populated with the specified + data. However, for backward compatibility reasons, bits [127:0] of Z0..Z31 + are always restored from the corresponding members of fpsimd_context.vregs[] + and not from sve_context. The remaining bits are restored from sve_context. + +* Inclusion of fpsimd_context in the signal frame remains mandatory, + irrespective of whether sve_context is present or not. + +* The vector length cannot be changed via signal return. If sve_context.vl in + the signal frame does not match the current vector length, the signal return + attempt is treated as illegal, resulting in a forced SIGSEGV. + + +6. prctl extensions +-------------------- + +Some new prctl() calls are added to allow programs to manage the SVE vector +length: + +prctl(PR_SVE_SET_VL, unsigned long arg) + + Sets the vector length of the calling thread and related flags, where + arg == vl | flags. Other threads of the calling process are unaffected. + + vl is the desired vector length, where sve_vl_valid(vl) must be true. + + flags: + + PR_SVE_SET_VL_INHERIT + + Inherit the current vector length across execve(). Otherwise, the + vector length is reset to the system default at execve(). (See + Section 9.) + + PR_SVE_SET_VL_ONEXEC + + Defer the requested vector length change until the next execve() + performed by this thread. + + The effect is equivalent to implicit exceution of the following + call immediately after the next execve() (if any) by the thread: + + prctl(PR_SVE_SET_VL, arg & ~PR_SVE_SET_VL_ONEXEC) + + This allows launching of a new program with a different vector + length, while avoiding runtime side effects in the caller. + + + Without PR_SVE_SET_VL_ONEXEC, the requested change takes effect + immediately. + + + Return value: a nonnegative on success, or a negative value on error: + EINVAL: SVE not supported, invalid vector length requested, or + invalid flags. + + + On success: + + * Either the calling thread's vector length or the deferred vector length + to be applied at the next execve() by the thread (dependent on whether + PR_SVE_SET_VL_ONEXEC is present in arg), is set to the largest value + supported by the system that is less than or equal to vl. If vl == + SVE_VL_MAX, the value set will be the largest value supported by the + system. + + * Any previously outstanding deferred vector length change in the calling + thread is cancelled. + + * The returned value describes the resulting configuration, encoded as for + PR_SVE_GET_VL. The vector length reported in this value is the new + current vector length for this thread if PR_SVE_SET_VL_ONEXEC was not + present in arg; otherwise, the reported vector length is the deferred + vector length that will be applied at the next execve() by the calling + thread. + + * Changing the vector length causes all of P0..P15, FFR and all bits of + Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become + unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current + vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC + flag, does not constitute a change to the vector length for this purpose. + + +prctl(PR_SVE_GET_VL) + + Gets the vector length of the calling thread. + + The following flag may be OR-ed into the result: + + PR_SVE_SET_VL_INHERIT + + Vector length will be inherited across execve(). + + There is no way to determine whether there is an outstanding deferred + vector length change (which would only normally be the case between a + fork() or vfork() and the corresponding execve() in typical use). + + To extract the vector length from the result, and it with + PR_SVE_VL_LEN_MASK. + + Return value: a nonnegative value on success, or a negative value on error: + EINVAL: SVE not supported. + + +7. ptrace extensions +--------------------- + +* A new regset NT_ARM_SVE is defined for use with PTRACE_GETREGSET and + PTRACE_SETREGSET. + + Refer to [2] for definitions. + +The regset data starts with struct user_sve_header, containing: + + size + + Size of the complete regset, in bytes. + This depends on vl and possibly on other things in the future. + + If a call to PTRACE_GETREGSET requests less data than the value of + size, the caller can allocate a larger buffer and retry in order to + read the complete regset. + + max_size + + Maximum size in bytes that the regset can grow to for the target + thread. The regset won't grow bigger than this even if the target + thread changes its vector length etc. + + vl + + Target thread's current vector length, in bytes. + + max_vl + + Maximum possible vector length for the target thread. + + flags + + either + + SVE_PT_REGS_FPSIMD + + SVE registers are not live (GETREGSET) or are to be made + non-live (SETREGSET). + + The payload is of type struct user_fpsimd_state, with the same + meaning as for NT_PRFPREG, starting at offset + SVE_PT_FPSIMD_OFFSET from the start of user_sve_header. + + Extra data might be appended in the future: the size of the + payload should be obtained using SVE_PT_FPSIMD_SIZE(vq, flags). + + vq should be obtained using sve_vq_from_vl(vl). + + or + + SVE_PT_REGS_SVE + + SVE registers are live (GETREGSET) or are to be made live + (SETREGSET). + + The payload contains the SVE register data, starting at offset + SVE_PT_SVE_OFFSET from the start of user_sve_header, and with + size SVE_PT_SVE_SIZE(vq, flags); + + ... OR-ed with zero or more of the following flags, which have the same + meaning and behaviour as the corresponding PR_SET_VL_* flags: + + SVE_PT_VL_INHERIT + + SVE_PT_VL_ONEXEC (SETREGSET only). + +* The effects of changing the vector length and/or flags are equivalent to + those documented for PR_SVE_SET_VL. + + The caller must make a further GETREGSET call if it needs to know what VL is + actually set by SETREGSET, unless is it known in advance that the requested + VL is supported. + +* In the SVE_PT_REGS_SVE case, the size and layout of the payload depends on + the header fields. The SVE_PT_SVE_*() macros are provided to facilitate + access to the members. + +* In either case, for SETREGSET it is permissible to omit the payload, in which + case only the vector length and flags are changed (along with any + consequences of those changes). + +* For SETREGSET, if an SVE_PT_REGS_SVE payload is present and the + requested VL is not supported, the effect will be the same as if the + payload were omitted, except that an EIO error is reported. No + attempt is made to translate the payload data to the correct layout + for the vector length actually set. The thread's FPSIMD state is + preserved, but the remaining bits of the SVE registers become + unspecified. It is up to the caller to translate the payload layout + for the actual VL and retry. + +* The effect of writing a partial, incomplete payload is unspecified. + + +8. ELF coredump extensions +--------------------------- + +* A NT_ARM_SVE note will be added to each coredump for each thread of the + dumped process. The contents will be equivalent to the data that would have + been read if a PTRACE_GETREGSET of NT_ARM_SVE were executed for each thread + when the coredump was generated. + + +9. System runtime configuration +-------------------------------- + +* To mitigate the ABI impact of expansion of the signal frame, a policy + mechanism is provided for administrators, distro maintainers and developers + to set the default vector length for userspace processes: + +/proc/sys/abi/sve_default_vector_length + + Writing the text representation of an integer to this file sets the system + default vector length to the specified value, unless the value is greater + than the maximum vector length supported by the system in which case the + default vector length is set to that maximum. + + The result can be determined by reopening the file and reading its + contents. + + At boot, the default vector length is initially set to 64 or the maximum + supported vector length, whichever is smaller. This determines the initial + vector length of the init process (PID 1). + + Reading this file returns the current system default vector length. + +* At every execve() call, the new vector length of the new process is set to + the system default vector length, unless + + * PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the + calling thread, or + + * a deferred vector length change is pending, established via the + PR_SVE_SET_VL_ONEXEC flag (or SVE_PT_VL_ONEXEC). + +* Modifying the system default vector length does not affect the vector length + of any existing process or thread that does not make an execve() call. + + +Appendix A. SVE programmer's model (informative) +================================================= + +This section provides a minimal description of the additions made by SVE to the +ARMv8-A programmer's model that are relevant to this document. + +Note: This section is for information only and not intended to be complete or +to replace any architectural specification. + +A.1. Registers +--------------- + +In A64 state, SVE adds the following: + +* 32 8VL-bit vector registers Z0..Z31 + For each Zn, Zn bits [127:0] alias the ARMv8-A vector register Vn. + + A register write using a Vn register name zeros all bits of the corresponding + Zn except for bits [127:0]. + +* 16 VL-bit predicate registers P0..P15 + +* 1 VL-bit special-purpose predicate register FFR (the "first-fault register") + +* a VL "pseudo-register" that determines the size of each vector register + + The SVE instruction set architecture provides no way to write VL directly. + Instead, it can be modified only by EL1 and above, by writing appropriate + system registers. + +* The value of VL can be configured at runtime by EL1 and above: + 16 <= VL <= VLmax, where VL must be a multiple of 16. + +* The maximum vector length is determined by the hardware: + 16 <= VLmax <= 256. + + (The SVE architecture specifies 256, but permits future architecture + revisions to raise this limit.) + +* FPSR and FPCR are retained from ARMv8-A, and interact with SVE floating-point + operations in a similar way to the way in which they interact with ARMv8 + floating-point operations. + + 8VL-1 128 0 bit index + +---- //// -----------------+ + Z0 | : V0 | + : : + Z7 | : V7 | + Z8 | : * V8 | + : : : + Z15 | : *V15 | + Z16 | : V16 | + : : + Z31 | : V31 | + +---- //// -----------------+ + 31 0 + VL-1 0 +-------+ + +---- //// --+ FPSR | | + P0 | | +-------+ + : | | *FPCR | | + P15 | | +-------+ + +---- //// --+ + FFR | | +-----+ + +---- //// --+ VL | | + +-----+ + +(*) callee-save: + This only applies to bits [63:0] of Z-/V-registers. + FPCR contains callee-save and caller-save bits. See [4] for details. + + +A.2. Procedure call standard +----------------------------- + +The ARMv8-A base procedure call standard is extended as follows with respect to +the additional SVE register state: + +* All SVE register bits that are not shared with FP/SIMD are caller-save. + +* Z8 bits [63:0] .. Z15 bits [63:0] are callee-save. + + This follows from the way these bits are mapped to V8..V15, which are caller- + save in the base procedure call standard. + + +Appendix B. ARMv8-A FP/SIMD programmer's model +=============================================== + +Note: This section is for information only and not intended to be complete or +to replace any architectural specification. + +Refer to [4] for for more information. + +ARMv8-A defines the following floating-point / SIMD register state: + +* 32 128-bit vector registers V0..V31 +* 2 32-bit status/control registers FPSR, FPCR + + 127 0 bit index + +---------------+ + V0 | | + : : : + V7 | | + * V8 | | + : : : : + *V15 | | + V16 | | + : : : + V31 | | + +---------------+ + + 31 0 + +-------+ + FPSR | | + +-------+ + *FPCR | | + +-------+ + +(*) callee-save: + This only applies to bits [63:0] of V-registers. + FPCR contains a mixture of callee-save and caller-save bits. + + +References +========== + +[1] arch/arm64/include/uapi/asm/sigcontext.h + AArch64 Linux signal ABI definitions + +[2] arch/arm64/include/uapi/asm/ptrace.h + AArch64 Linux ptrace ABI definitions + +[3] linux/Documentation/arm64/cpu-feature-registers.txt + +[4] ARM IHI0055C + http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf + http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html + Procedure Call Standard for the ARM 64-bit Architecture (AArch64) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 7654a81bc577..3c0b48498db9 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -133,6 +133,9 @@ struct sve_context { * The SVE architecture leaves space for future expansion of the * vector length beyond its initial architectural limit of 2048 bits * (16 quadwords). + * + * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ + * terminology. */ #define SVE_VQ_BYTES 16 /* number of bytes per quadword */ -- cgit From fb8722735f50cd51204bfbeefa2e5e7e9ff5b2be Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 3 Nov 2017 15:18:58 +0100 Subject: arm64: support __int128 on gcc 5+ Versions of gcc prior to gcc 5 emitted a __multi3 function call when dealing with TI types, resulting in failures when trying to link to libgcc, and more generally, bad performance. However, since gcc 5, the compiler supports actually emitting fast instructions, which means we can at long last enable this option and receive the speedups. The gcc commit that added proper Aarch64 support is: https://gcc.gnu.org/git/?p=gcc.git;a=commitdiff;h=d1ae7bb994f49316f6f63e6173f2931e837a351d This commit appears to be part of the gcc 5 release. There are still a few instructions, __ashlti3 and __ashrti3, which require libgcc, which is fine. Rather than linking to libgcc, we simply provide them ourselves, since they're not that complicated. Signed-off-by: Jason A. Donenfeld Signed-off-by: Will Deacon --- arch/arm64/Makefile | 2 ++ arch/arm64/lib/Makefile | 2 +- arch/arm64/lib/tishift.S | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/lib/tishift.S diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 953e43dd0417..b35788c909f1 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -57,6 +57,8 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index a0abc142c92b..55bdb01f1ea6 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -2,7 +2,7 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ clear_page.o memchr.o memcpy.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ - strchr.o strrchr.o + strchr.o strrchr.o tishift.o # Tell the compiler to treat all general purpose registers (with the # exception of the IP registers, which are already handled by the caller diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S new file mode 100644 index 000000000000..7236776655ed --- /dev/null +++ b/arch/arm64/lib/tishift.S @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2017 Jason A. Donenfeld . All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +ENTRY(__ashlti3) + cbz x2, 1f + mov x3, #64 + sub x3, x3, x2 + cmp x3, #0 + b.le 2f + lsl x1, x1, x2 + lsr x3, x0, x3 + lsl x2, x0, x2 + orr x1, x1, x3 + mov x0, x2 +1: + ret +2: + neg w1, w3 + mov x2, #0 + lsl x1, x0, x1 + mov x0, x2 + ret +ENDPROC(__ashlti3) + +ENTRY(__ashrti3) + cbz x2, 3f + mov x3, #64 + sub x3, x3, x2 + cmp x3, #0 + b.le 4f + lsr x0, x0, x2 + lsl x3, x1, x3 + asr x2, x1, x2 + orr x0, x0, x3 + mov x1, x2 +3: + ret +4: + neg w0, w3 + asr x2, x1, #63 + asr x0, x1, x0 + mov x1, x2 + ret +ENDPROC(__ashrti3) -- cgit From 9bfe7553fadb269e45a6e10f68b727957dff5676 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 7 Nov 2017 01:49:54 +0000 Subject: arm64: Implement __lshrti3 library function Commit fb8722735f50 ("arm64: support __int128 on gcc 5+") added support for the __int128 data type, but this breaks the build in some configurations where GCC ends up emitting calls to the __lshrti3 helper in libgcc, which results in a link error: kernel/sched/fair.o: In function `__calc_delta': fair.c:(.text+0xca0): undefined reference to `__lshrti3' kernel/time/timekeeping.o: In function `timekeeping_resume': timekeeping.c:(.text+0x3f60): undefined reference to `__lshrti3' make: *** [vmlinux] Error 1 Fix the build by providing an implementation of __lshrti3, like we do already for __ashlti3 and __ashrti3. Reported-by: Arnd Bergmann Signed-off-by: Jason A. Donenfeld Signed-off-by: Will Deacon --- arch/arm64/lib/tishift.S | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S index 7236776655ed..0179a43cc045 100644 --- a/arch/arm64/lib/tishift.S +++ b/arch/arm64/lib/tishift.S @@ -57,3 +57,24 @@ ENTRY(__ashrti3) mov x1, x2 ret ENDPROC(__ashrti3) + +ENTRY(__lshrti3) + cbz x2, 1f + mov x3, #64 + sub x3, x3, x2 + cmp x3, #0 + b.le 2f + lsr x0, x0, x2 + lsl x3, x1, x3 + lsr x2, x1, x2 + orr x0, x0, x3 + mov x1, x2 +1: + ret +2: + neg w0, w3 + mov x2, #0 + lsr x0, x1, x0 + mov x1, x2 + ret +ENDPROC(__lshrti3) -- cgit From 6cfa7cc46b1a7a15d81d5389c99cfca633c12b8e Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 6 Nov 2017 18:07:11 +0000 Subject: arm64: Make ARMV8_DEPRECATED depend on SYSCTL If CONFIG_SYSCTL=n and CONFIG_ARMV8_DEPRECATED=y, the deprecated instruction emulation code currently leaks some memory at boot time, and won't have any runtime control interface. This does not feel like useful or intended behaviour... This patch adds a dependency on CONFIG_SYSCTL, so that such a kernel can't be built in the first place. It's probably not worth adding the error-handling / cleanup code that would be needed to deal with this otherwise: people who desperately need the emulation can still enable SYSCTL. Acked-by: Arnd Bergmann Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1046074e0c24..df296b79ea15 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -807,6 +807,7 @@ config FORCE_MAX_ZONEORDER menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT + depends on SYSCTL help Legacy software support may require certain instructions that have been deprecated or obsoleted in the architecture. -- cgit