From 3567fa63cb5680d3e1e8375c547a0e305c8a0ff5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 13 Dec 2023 09:40:32 +0100 Subject: arm64: kaslr: Adjust randomization range dynamically Currently, we base the KASLR randomization range on a rough estimate of the available space in the upper VA region: the lower 1/4th has the module region and the upper 1/4th has the fixmap, vmemmap and PCI I/O ranges, and so we pick a random location in the remaining space in the middle. Once we enable support for 5-level paging with 4k pages, this no longer works: the vmemmap region, being dimensioned to cover a 52-bit linear region, takes up so much space in the upper VA region (the size of which is based on a 48-bit VA space for compatibility with non-LVA hardware) that the region above the vmalloc region takes up more than a quarter of the available space. So instead of a heuristic, let's derive the randomization range from the actual boundaries of the vmalloc region. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20231213084024.2367360-16-ardb@google.com Signed-off-by: Catalin Marinas Acked-by: Mark Rutland --- arch/arm64/kernel/image-vars.h | 2 ++ arch/arm64/kernel/pi/kaslr_early.c | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 5e4dc72ab1bd..e931ce078a00 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -36,6 +36,8 @@ PROVIDE(__pi___memcpy = __pi_memcpy); PROVIDE(__pi___memmove = __pi_memmove); PROVIDE(__pi___memset = __pi_memset); +PROVIDE(__pi_vabits_actual = vabits_actual); + #ifdef CONFIG_KVM /* diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c index 17bff6e399e4..b9e0bb4bc6a9 100644 --- a/arch/arm64/kernel/pi/kaslr_early.c +++ b/arch/arm64/kernel/pi/kaslr_early.c @@ -14,6 +14,7 @@ #include #include +#include /* taken from lib/string.c */ static char *__strstr(const char *s1, const char *s2) @@ -87,7 +88,7 @@ static u64 get_kaslr_seed(void *fdt) asmlinkage u64 kaslr_early_init(void *fdt) { - u64 seed; + u64 seed, range; if (is_kaslr_disabled_cmdline(fdt)) return 0; @@ -102,9 +103,9 @@ asmlinkage u64 kaslr_early_init(void *fdt) /* * OK, so we are proceeding with KASLR enabled. Calculate a suitable * kernel image offset from the seed. Let's place the kernel in the - * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of - * the lower and upper quarters to avoid colliding with other - * allocations. + * 'middle' half of the VMALLOC area, and stay clear of the lower and + * upper quarters to avoid colliding with other allocations. */ - return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0)); + range = (VMALLOC_END - KIMAGE_VADDR) / 2; + return range / 2 + (((__uint128_t)range * seed) >> 64); } -- cgit From 270de609ae2af441d15289406340ff209e5dc864 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 6 Feb 2024 12:38:46 +0000 Subject: arm64: Simplify do_notify_resume() DAIF masking In do_notify_resume, we handle _TIF_NEED_RESCHED differently from all other flags, leaving IRQ+FIQ masked when calling into schedule(). This masking is a historical artifact, and it is not currently necessary to mask IRQ+FIQ when calling into schedule (as evidenced by the generic exit_to_user_mode_loop(), which unmasks IRQs before checking _TIF_NEED_RESCHED and calling schedule()). This patch removes the special case for _TIF_NEED_RESCHED, moving this check into the main loop such that schedule() will be called from a regular process context with IRQ+FIQ unmasked. This is a minor simplification to do_notify_resume() and brings it into line with the generic exit_to_user_mode_loop() logic. This will also aid subsequent rework of DAIF management. Signed-off-by: Mark Rutland Cc: James Morse Cc: Mark Brown Cc: Will Deacon Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240206123848.1696480-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas Tested-by: Itaru Kitayama --- arch/arm64/kernel/signal.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0e8beb3349ea..50e108741599 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -1281,32 +1281,28 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { - if (thread_flags & _TIF_NEED_RESCHED) { - /* Unmask Debug and SError for the next task */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(DAIF_PROCCTX); + if (thread_flags & _TIF_NEED_RESCHED) schedule(); - } else { - local_daif_restore(DAIF_PROCCTX); - if (thread_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); + if (thread_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); - if (thread_flags & _TIF_MTE_ASYNC_FAULT) { - clear_thread_flag(TIF_MTE_ASYNC_FAULT); - send_sig_fault(SIGSEGV, SEGV_MTEAERR, - (void __user *)NULL, current); - } + if (thread_flags & _TIF_MTE_ASYNC_FAULT) { + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + send_sig_fault(SIGSEGV, SEGV_MTEAERR, + (void __user *)NULL, current); + } - if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - do_signal(regs); + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); - if (thread_flags & _TIF_NOTIFY_RESUME) - resume_user_mode_work(regs); + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); - if (thread_flags & _TIF_FOREIGN_FPSTATE) - fpsimd_restore_current_state(); - } + if (thread_flags & _TIF_FOREIGN_FPSTATE) + fpsimd_restore_current_state(); local_daif_mask(); thread_flags = read_thread_flags(); -- cgit From 997d79eb938e981ab0d3714d39ed148bce131d9e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 6 Feb 2024 12:38:47 +0000 Subject: arm64: Move do_notify_resume() to entry-common.c Currently do_notify_resume() lives in arch/arm64/kernel/signal.c, but it would make more sense for it to live in entry-common.c as it handles more than signals, and is coupled with the rest of the return-to-userspace sequence (e.g. with unusual DAIF masking that matches the exception return requirements). Move do_notify_resume() to entry-common.c. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: James Morse Cc: Mark Brown Cc: Will Deacon Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240206123848.1696480-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas Tested-by: Itaru Kitayama --- arch/arm64/kernel/entry-common.c | 32 ++++++++++++++++++++++++++++++++ arch/arm64/kernel/signal.c | 35 ++--------------------------------- 2 files changed, 34 insertions(+), 33 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 0fc94207e69a..3c849ad03bf8 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -126,6 +127,37 @@ static __always_inline void __exit_to_user_mode(void) lockdep_hardirqs_on(CALLER_ADDR0); } +static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + local_daif_restore(DAIF_PROCCTX); + + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + + if (thread_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + if (thread_flags & _TIF_MTE_ASYNC_FAULT) { + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + send_sig_fault(SIGSEGV, SEGV_MTEAERR, + (void __user *)NULL, current); + } + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + if (thread_flags & _TIF_FOREIGN_FPSTATE) + fpsimd_restore_current_state(); + + local_daif_mask(); + thread_flags = read_thread_flags(); + } while (thread_flags & _TIF_WORK_MASK); +} + static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) { unsigned long flags; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 50e108741599..c08e6465e0f4 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -16,8 +16,8 @@ #include #include #include -#include #include +#include #include #include @@ -1207,7 +1207,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -static void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { unsigned long continue_addr = 0, restart_addr = 0; int retval = 0; @@ -1278,37 +1278,6 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } -void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) -{ - do { - local_daif_restore(DAIF_PROCCTX); - - if (thread_flags & _TIF_NEED_RESCHED) - schedule(); - - if (thread_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - - if (thread_flags & _TIF_MTE_ASYNC_FAULT) { - clear_thread_flag(TIF_MTE_ASYNC_FAULT); - send_sig_fault(SIGSEGV, SEGV_MTEAERR, - (void __user *)NULL, current); - } - - if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - do_signal(regs); - - if (thread_flags & _TIF_NOTIFY_RESUME) - resume_user_mode_work(regs); - - if (thread_flags & _TIF_FOREIGN_FPSTATE) - fpsimd_restore_current_state(); - - local_daif_mask(); - thread_flags = read_thread_flags(); - } while (thread_flags & _TIF_WORK_MASK); -} - unsigned long __ro_after_init signal_minsigstksz; /* -- cgit From 97d935faacde478501eea6f75c86beea71f29ba3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 6 Feb 2024 12:38:48 +0000 Subject: arm64: Unmask Debug + SError in do_notify_resume() When returning to a user context, the arm64 entry code masks all DAIF exceptions before handling pending work in exit_to_user_mode_prepare() and do_notify_resume(), where it will transiently unmask all DAIF exceptions. This is a holdover from the old entry assembly, which conservatively masked all DAIF exceptions, and it's only necessary to mask interrupts at this point during the exception return path, so long as we subsequently mask all DAIF exceptions before the actual exception return. While most DAIF manipulation follows a save...restore sequence, the manipulation in do_notify_resume() is the other way around, unmasking all DAIF exceptions before masking them again. This is unfortunate as we unnecessarily mask Debug and SError exceptions, and it would be nice to remove this special case to make DAIF manipulation simpler and most consistent. This patch changes exit_to_user_mode_prepare() and do_notify_resume() to only mask interrupts while handling pending work, masking other DAIF exceptions after this has completed. This removes the unusual DAIF manipulation and allows Debug and SError exceptions to be taken for a slightly longer window during the exception return path. Signed-off-by: Mark Rutland Cc: James Morse Cc: Mark Brown Cc: Will Deacon Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240206123848.1696480-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas Tested-by: Itaru Kitayama --- arch/arm64/kernel/entry-common.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3c849ad03bf8..b77a15955f28 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -130,7 +130,7 @@ static __always_inline void __exit_to_user_mode(void) static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { - local_daif_restore(DAIF_PROCCTX); + local_irq_enable(); if (thread_flags & _TIF_NEED_RESCHED) schedule(); @@ -153,7 +153,7 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) if (thread_flags & _TIF_FOREIGN_FPSTATE) fpsimd_restore_current_state(); - local_daif_mask(); + local_irq_disable(); thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); } @@ -162,12 +162,14 @@ static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) { unsigned long flags; - local_daif_mask(); + local_irq_disable(); flags = read_thread_flags(); if (unlikely(flags & _TIF_WORK_MASK)) do_notify_resume(regs, flags); + local_daif_mask(); + lockdep_sys_exit(); } -- cgit From 253751233b19b58b1d361388c8d2b40c940e729c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 8 Feb 2024 14:59:16 +0000 Subject: arm64: kretprobes: acquire the regs via a BRK exception On arm64, kprobes always take an exception and so create a struct pt_regs through the usual exception entry logic. Similarly kretprobes taskes and exception for function entry, but for function returns it uses a trampoline which attempts to create a struct pt_regs without taking an exception. This is problematic for a few reasons, including: 1) The kretprobes trampoline neither saves nor restores all of the portions of PSTATE. Before invoking the handler it saves a number of portions of PSTATE, and after returning from the handler it restores NZCV before returning to the original return address provided by the handler. 2) The kretprobe trampoline constructs the PSTATE value piecemeal from special purpose registers as it cannot read all of PSTATE atomically without taking an exception. This is somewhat fragile, and it's not possible to reliably recover PSTATE information which only exists on some physical CPUs (e.g. when SSBS support is mismatched). Today the kretprobes trampoline does not record: - BTYPE - SSBS - ALLINT - SS - PAN - UAO - DIT - TCO ... and this will only get worse with future architecture extensions which add more PSTATE bits. 3) The kretprobes trampoline doesn't store portions of struct pt_regs (e.g. the PMR value when using pseudo-NMIs). Due to this, helpers which operate on a struct pt_regs, such as interrupts_enabled(), may not work correctly. 4) The function entry and function exit handlers run in different contexts. The entry handler will always be run in a debug exception context (which is currently treated as an NMI), but the return will be treated as whatever context the instrumented function was executed in. The differences between these contexts are liable to cause problems (e.g. as the two can be differently interruptible or preemptible, adversely affecting synchronization between the handlers). 5) As the kretprobes trampoline runs in the same context as the code being probed, it is subject to the same single-stepping context, which may not be desirable if this is being driven by the kprobes handlers. Overall, this is fragile, painful to maintain, and gets in the way of supporting other things (e.g. RELIABLE_STACKTRACE, FEAT_NMI). This patch addresses these issues by replacing the kretprobes trampoline with a `BRK` instruction, and using an exception boundary to acquire and restore the regs, in the same way as the regular kprobes trampoline. Ive tested this atop v6.8-rc3: | KTAP version 1 | 1..1 | KTAP version 1 | # Subtest: kprobes_test | # module: test_kprobes | 1..7 | ok 1 test_kprobe | ok 2 test_kprobes | ok 3 test_kprobe_missed | ok 4 test_kretprobe | ok 5 test_kretprobes | ok 6 test_stacktrace_on_kretprobe | ok 7 test_stacktrace_on_nested_kretprobe | # kprobes_test: pass:7 fail:0 skip:0 total:7 | # Totals: pass:7 fail:0 skip:0 total:7 | ok 1 kprobes_test Signed-off-by: Mark Rutland Cc: Will Deacon Cc: Florent Revest Cc: Masami Hiramatsu Cc: Steven Rostedt Acked-by: Masami Hiramatsu (Google) Link: https://lore.kernel.org/r/20240208145916.2004154-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 21 ++++++-- arch/arm64/kernel/probes/kprobes_trampoline.S | 78 +++------------------------ 2 files changed, 22 insertions(+), 77 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 70b91a8c6bb3..327855a11df2 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -371,6 +371,21 @@ static struct break_hook kprobes_break_ss_hook = { .fn = kprobe_breakpoint_ss_handler, }; +static int __kprobes +kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) +{ + if (regs->pc != (unsigned long)__kretprobe_trampoline) + return DBG_HOOK_ERROR; + + regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); + return DBG_HOOK_HANDLED; +} + +static struct break_hook kretprobes_break_hook = { + .imm = KRETPROBES_BRK_IMM, + .fn = kretprobe_breakpoint_handler, +}; + /* * Provide a blacklist of symbols identifying ranges which cannot be kprobed. * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). @@ -396,11 +411,6 @@ int __init arch_populate_kprobe_blacklist(void) return ret; } -void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) -{ - return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); -} - void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -420,6 +430,7 @@ int __init arch_init_kprobes(void) { register_kernel_break_hook(&kprobes_break_hook); register_kernel_break_hook(&kprobes_break_ss_hook); + register_kernel_break_hook(&kretprobes_break_hook); return 0; } diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S index 9a6499bed58b..a362f3dbb3d1 100644 --- a/arch/arm64/kernel/probes/kprobes_trampoline.S +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S @@ -4,83 +4,17 @@ */ #include -#include +#include #include .text - .macro save_all_base_regs - stp x0, x1, [sp, #S_X0] - stp x2, x3, [sp, #S_X2] - stp x4, x5, [sp, #S_X4] - stp x6, x7, [sp, #S_X6] - stp x8, x9, [sp, #S_X8] - stp x10, x11, [sp, #S_X10] - stp x12, x13, [sp, #S_X12] - stp x14, x15, [sp, #S_X14] - stp x16, x17, [sp, #S_X16] - stp x18, x19, [sp, #S_X18] - stp x20, x21, [sp, #S_X20] - stp x22, x23, [sp, #S_X22] - stp x24, x25, [sp, #S_X24] - stp x26, x27, [sp, #S_X26] - stp x28, x29, [sp, #S_X28] - add x0, sp, #PT_REGS_SIZE - stp lr, x0, [sp, #S_LR] - /* - * Construct a useful saved PSTATE - */ - mrs x0, nzcv - mrs x1, daif - orr x0, x0, x1 - mrs x1, CurrentEL - orr x0, x0, x1 - mrs x1, SPSel - orr x0, x0, x1 - stp xzr, x0, [sp, #S_PC] - .endm - - .macro restore_all_base_regs - ldr x0, [sp, #S_PSTATE] - and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) - msr nzcv, x0 - ldp x0, x1, [sp, #S_X0] - ldp x2, x3, [sp, #S_X2] - ldp x4, x5, [sp, #S_X4] - ldp x6, x7, [sp, #S_X6] - ldp x8, x9, [sp, #S_X8] - ldp x10, x11, [sp, #S_X10] - ldp x12, x13, [sp, #S_X12] - ldp x14, x15, [sp, #S_X14] - ldp x16, x17, [sp, #S_X16] - ldp x18, x19, [sp, #S_X18] - ldp x20, x21, [sp, #S_X20] - ldp x22, x23, [sp, #S_X22] - ldp x24, x25, [sp, #S_X24] - ldp x26, x27, [sp, #S_X26] - ldp x28, x29, [sp, #S_X28] - .endm - SYM_CODE_START(__kretprobe_trampoline) - sub sp, sp, #PT_REGS_SIZE - - save_all_base_regs - - /* Setup a frame pointer. */ - add x29, sp, #S_FP - - mov x0, sp - bl trampoline_probe_handler /* - * Replace trampoline address in lr with actual orig_ret_addr return - * address. + * Trigger a breakpoint exception. The PC will be adjusted by + * kretprobe_breakpoint_handler(), and no subsequent instructions will + * be executed from the trampoline. */ - mov lr, x0 - - /* The frame pointer (x29) is restored with other registers. */ - restore_all_base_regs - - add sp, sp, #PT_REGS_SIZE - ret - + brk #KRETPROBES_BRK_IMM + ASM_BUG() SYM_CODE_END(__kretprobe_trampoline) -- cgit From bce79b0c8097ae8b9ad38d7cb8522b80a9b8ee00 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Fri, 2 Feb 2024 12:02:11 +0800 Subject: arm64: remove unneeded BUILD_BUG_ON assertion Since commit c02433dd6de3 ("arm64: split thread_info from task stack"), CONFIG_THREAD_INFO_IN_TASK is enabled unconditionally for arm64. So remove this always-true assertion from arch_dup_task_struct. Signed-off-by: Dawei Li Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240202040211.3118918-1-dawei.li@shingroup.cn Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7387b68c745b..4ae31b7af6c3 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -290,9 +290,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) fpsimd_preserve_current_state(); *dst = *src; - /* We rely on the above assignment to initialize dst's thread_flags: */ - BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); - /* * Detach src's sve_state (if any) from dst so that it does not * get erroneously used or freed prematurely. dst's copies -- cgit From 58a0484eaf5ec1e94e530c1074abf852354eca8c Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Wed, 31 Jan 2024 01:55:04 +0800 Subject: arm64: make member of struct pt_regs and it's offset macro in the same order In struct pt_regs, member pstate is after member pc. Move offset macro of pstate after offset macro of pc to improve readability a little. Signed-off-by: Kemeng Shi Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240130175504.106364-1-shikemeng@huaweicloud.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/asm-offsets.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 5a7dbbe0ce63..81496083c041 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -75,8 +75,8 @@ int main(void) DEFINE(S_FP, offsetof(struct pt_regs, regs[29])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); - DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); + DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1)); DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); -- cgit From 2f0090549b649cc9fd61c0193189c25f8fc03119 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 13 Feb 2024 15:32:45 +0000 Subject: arm64/sve: Ensure that all fields in ZCR_EL1 are set to known values At present nothing in our CPU initialisation code ever sets unknown fields in ZCR_EL1 to known values, all updates to ZCR_EL1 are read/modify/write sequences for LEN. All the unknown fields are RES0, explicitly initialise them as such to avoid future surprises. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240213-arm64-fp-init-vec-cr-v1-1-7e7c2d584f26@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a5dc6f764195..cc3c9ad877a8 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1134,6 +1134,8 @@ void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p) { write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); isb(); + + write_sysreg_s(0, SYS_ZCR_EL1); } void __init sve_setup(void) -- cgit From 93576e34988757ed431e305ba9d3597e46dffe6b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 13 Feb 2024 15:32:46 +0000 Subject: arm64/sme: Ensure that all fields in SMCR_EL1 are set to known values At present nothing in our CPU initialisation code ever sets unknown fields in SMCR_EL1 to known values, all updates to SMCR_EL1 are read/modify/write sequences. All the unknown fields are RES0, explicitly initialise them as such to avoid future surprises. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240213-arm64-fp-init-vec-cr-v1-2-7e7c2d584f26@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index cc3c9ad877a8..f96907b813fa 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1247,6 +1247,9 @@ void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p) write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); isb(); + /* Ensure all bits in SMCR are set to known values */ + write_sysreg_s(0, SYS_SMCR_EL1); + /* Allow EL0 to access TPIDR2 */ write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1); isb(); -- cgit From 1984c805461f7fc4e96855eb4d94043ffb8f873d Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Tue, 9 Jan 2024 00:46:50 -0300 Subject: arm64: remove unnecessary ifdefs around is_compat_task() Currently some parts of the codebase will test for CONFIG_COMPAT before testing is_compat_task(). is_compat_task() is a inlined function only present on CONFIG_COMPAT. On the other hand, for !CONFIG_COMPAT, we have in linux/compat.h: #define is_compat_task() (0) Since we have this define available in every usage of is_compat_task() for !CONFIG_COMPAT, it's unnecessary to keep the ifdefs, since the compiler is smart enough to optimize-out those snippets on CONFIG_COMPAT=n This requires some regset code as well as a few other defines to be made available on !CONFIG_COMPAT, so some symbols can get resolved before getting optimized-out. Signed-off-by: Leonardo Bras Reviewed-by: Arnd Bergmann Link: https://lore.kernel.org/r/20240109034651.478462-2-leobras@redhat.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 8 +++----- arch/arm64/kernel/syscall.c | 5 +---- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index dc6cf0e37194..366fa248b968 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -174,7 +174,6 @@ static void ptrace_hbptriggered(struct perf_event *bp, struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); const char *desc = "Hardware breakpoint trap (ptrace)"; -#ifdef CONFIG_COMPAT if (is_compat_task()) { int si_errno = 0; int i; @@ -196,7 +195,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, desc); return; } -#endif + arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); } @@ -1596,7 +1595,6 @@ static const struct user_regset_view user_aarch64_view = { .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) }; -#ifdef CONFIG_COMPAT enum compat_regset { REGSET_COMPAT_GPR, REGSET_COMPAT_VFP, @@ -1853,6 +1851,7 @@ static const struct user_regset_view user_aarch32_ptrace_view = { .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) }; +#ifdef CONFIG_COMPAT static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, compat_ulong_t __user *ret) { @@ -2114,7 +2113,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, const struct user_regset_view *task_user_regset_view(struct task_struct *task) { -#ifdef CONFIG_COMPAT /* * Core dumping of 32-bit tasks or compat ptrace requests must use the * user_aarch32_view compatible with arm32. Native ptrace requests on @@ -2125,7 +2123,7 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) return &user_aarch32_view; else if (is_compat_thread(task_thread_info(task))) return &user_aarch32_ptrace_view; -#endif + return &user_aarch64_view; } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 9a70d9746b66..ad198262b981 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -20,14 +20,11 @@ long sys_ni_syscall(void); static long do_ni_syscall(struct pt_regs *regs, int scno) { -#ifdef CONFIG_COMPAT - long ret; if (is_compat_task()) { - ret = compat_arm_syscall(regs, scno); + long ret = compat_arm_syscall(regs, scno); if (ret != -ENOSYS) return ret; } -#endif return sys_ni_syscall(); } -- cgit From 6d1ce806e17fcabe91a912363cc1a5f108734627 Mon Sep 17 00:00:00 2001 From: Ryo Takakura Date: Wed, 28 Feb 2024 11:28:36 +0900 Subject: arm64: Update setup_arch() comment on interrupt masking DAIF_PROCCTX_NOIRQ contains the FIQ bit. Update the comment as only asynchronous aborts are unmasked and FIQ is still masked. Signed-off-by: Ryo Takakura Link: https://lore.kernel.org/r/20240228022836.1756-1-takakura@valinux.co.jp Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 42c690bb2d60..ab43bfa85368 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -320,9 +320,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) dynamic_scs_init(); /* - * Unmask asynchronous aborts and fiq after bringing up possible - * earlycon. (Report possible System Errors once we can report this - * occurred). + * Unmask SError as soon as possible after initializing earlycon so + * that we can report any SErrors immediately. */ local_daif_restore(DAIF_PROCCTX_NOIRQ); -- cgit From 622442666dcca0f273fd8b1adf80cd1893ed88cf Mon Sep 17 00:00:00 2001 From: Liao Chang Date: Thu, 29 Feb 2024 10:52:08 +0000 Subject: arm64: cpufeatures: Clean up temporary variable to simplify code Clean up one temporary variable to simplifiy code in capability detection. Signed-off-by: Liao Chang Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240229105208.456704-1-liaochang1@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8d1a634a403e..0e900b23f7ab 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -3052,13 +3052,9 @@ static void __init enable_cpu_capabilities(u16 scope_mask) boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); for (i = 0; i < ARM64_NCAPS; i++) { - unsigned int num; - caps = cpucap_ptrs[i]; - if (!caps || !(caps->type & scope_mask)) - continue; - num = caps->capability; - if (!cpus_have_cap(num)) + if (!caps || !(caps->type & scope_mask) || + !cpus_have_cap(caps->capability)) continue; if (boot_scope && caps->cpu_enable) -- cgit From 9d6b6789c8787fb1183d176a00569fb9b192243d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 29 Feb 2024 14:04:31 +0530 Subject: arm64/hw_breakpoint: Directly use ESR_ELx_WNR for an watchpoint exception Let's use existing ISS encoding for an watchpoint exception i.e ESR_ELx_WNR This represents an instruction's either writing to or reading from a memory location during an watchpoint exception. While here this drops non-standard macro AARCH64_ESR_ACCESS_MASK. Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Mark Brown Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240229083431.356578-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/hw_breakpoint.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 35225632d70a..2f5755192c2b 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -779,7 +780,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr, * Check that the access type matches. * 0 => load, otherwise => store */ - access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : + access = (esr & ESR_ELx_WNR) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) continue; -- cgit From cc9f69a3dad3b64b299dc2d5f95935fe16cb8b79 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 6 Mar 2024 23:14:46 +0000 Subject: arm64/cpufeature: Hook new identification registers up to cpufeature The 2023 architecture extensions have defined several new ID registers, hook them up to the cpufeature code so we can add feature checks and hwcaps based on their contents. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-1-c568edc8ed7f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 28 ++++++++++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 3 +++ 2 files changed, 31 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8d1a634a403e..eae59ec0f4b0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -234,6 +234,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64isar3[] = { + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0), @@ -267,6 +271,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = { + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0), @@ -319,6 +327,10 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = { + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0), @@ -702,10 +714,12 @@ static const struct __ftr_reg_entry { &id_aa64pfr0_override), ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, &id_aa64pfr1_override), + ARM64_FTR_REG(SYS_ID_AA64PFR2_EL1, ftr_id_aa64pfr2), ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0, &id_aa64zfr0_override), ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0, &id_aa64smfr0_override), + ARM64_FTR_REG(SYS_ID_AA64FPFR0_EL1, ftr_id_aa64fpfr0), /* Op1 = 0, CRn = 0, CRm = 5 */ ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), @@ -717,6 +731,7 @@ static const struct __ftr_reg_entry { &id_aa64isar1_override), ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2, &id_aa64isar2_override), + ARM64_FTR_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), @@ -1043,14 +1058,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); + init_cpu_ftr_reg(SYS_ID_AA64ISAR3_EL1, info->reg_id_aa64isar3); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3); init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); + init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2); init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0); + init_cpu_ftr_reg(SYS_ID_AA64FPFR0_EL1, info->reg_id_aa64fpfr0); if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) init_32bit_cpu_features(&info->aarch32); @@ -1272,6 +1290,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64isar1, boot->reg_id_aa64isar1); taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, info->reg_id_aa64isar2, boot->reg_id_aa64isar2); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu, + info->reg_id_aa64isar3, boot->reg_id_aa64isar3); /* * Differing PARange support is fine as long as all peripherals and @@ -1291,6 +1311,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); + taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu, + info->reg_id_aa64pfr2, boot->reg_id_aa64pfr2); taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); @@ -1298,6 +1320,9 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu, + info->reg_id_aa64fpfr0, boot->reg_id_aa64fpfr0); + /* Probe vector lengths */ if (IS_ENABLED(CONFIG_ARM64_SVE) && id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { @@ -1410,8 +1435,10 @@ u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64PFR0_EL1); read_sysreg_case(SYS_ID_AA64PFR1_EL1); + read_sysreg_case(SYS_ID_AA64PFR2_EL1); read_sysreg_case(SYS_ID_AA64ZFR0_EL1); read_sysreg_case(SYS_ID_AA64SMFR0_EL1); + read_sysreg_case(SYS_ID_AA64FPFR0_EL1); read_sysreg_case(SYS_ID_AA64DFR0_EL1); read_sysreg_case(SYS_ID_AA64DFR1_EL1); read_sysreg_case(SYS_ID_AA64MMFR0_EL1); @@ -1421,6 +1448,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); read_sysreg_case(SYS_ID_AA64ISAR2_EL1); + read_sysreg_case(SYS_ID_AA64ISAR3_EL1); read_sysreg_case(SYS_CNTFRQ_EL0); read_sysreg_case(SYS_CTR_EL0); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 47043c0d95ec..12b192060156 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -443,14 +443,17 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); + info->reg_id_aa64isar3 = read_cpuid(ID_AA64ISAR3_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1); info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_aa64pfr2 = read_cpuid(ID_AA64PFR2_EL1); info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1); + info->reg_id_aa64fpfr0 = read_cpuid(ID_AA64FPFR0_EL1); if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) info->reg_gmid = read_cpuid(GMID_EL1); -- cgit From 203f2b95a882dc46dd9873562167db69a1f61711 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 6 Mar 2024 23:14:48 +0000 Subject: arm64/fpsimd: Support FEAT_FPMR FEAT_FPMR defines a new EL0 accessible register FPMR use to configure the FP8 related features added to the architecture at the same time. Detect support for this register and context switch it for EL0 when present. Due to the sharing of responsibility for saving floating point state between the host kernel and KVM FP8 support is not yet implemented in KVM and a stub similar to that used for SVCR is provided for FPMR in order to avoid bisection issues. To make it easier to share host state with the hypervisor we store FPMR as a hardened usercopy field in uw (along with some padding). Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-3-c568edc8ed7f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 9 +++++++++ arch/arm64/kernel/fpsimd.c | 13 +++++++++++++ 2 files changed, 22 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index eae59ec0f4b0..0263565f617a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -272,6 +272,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2767,6 +2768,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_lpa2, }, + { + .desc = "FPMR", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_FPMR, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_fpmr, + ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP) + }, {}, }; diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a5dc6f764195..8e24b5e5e192 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -359,6 +359,9 @@ static void task_fpsimd_load(void) WARN_ON(preemptible()); WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE)); + if (system_supports_fpmr()) + write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR); + if (system_supports_sve() || system_supports_sme()) { switch (current->thread.fp_type) { case FP_STATE_FPSIMD: @@ -446,6 +449,9 @@ static void fpsimd_save_user_state(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return; + if (system_supports_fpmr()) + *(last->fpmr) = read_sysreg_s(SYS_FPMR); + /* * If a task is in a syscall the ABI allows us to only * preserve the state shared with FPSIMD so don't bother @@ -688,6 +694,12 @@ static void sve_to_fpsimd(struct task_struct *task) } } +void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p) +{ + write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK, + SYS_SCTLR_EL1); +} + #ifdef CONFIG_ARM64_SVE /* * Call __sve_free() directly only if you know task can't be scheduled @@ -1680,6 +1692,7 @@ static void fpsimd_bind_task_to_cpu(void) last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; + last->fpmr = ¤t->thread.uw.fpmr; last->fp_type = ¤t->thread.fp_type; last->to_save = FP_STATE_CURRENT; current->thread.fpsimd_cpu = smp_processor_id(); -- cgit From 8c46def44409fc914278630b7ba5ac142ab7c4f4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 6 Mar 2024 23:14:49 +0000 Subject: arm64/signal: Add FPMR signal handling Expose FPMR in the signal context on systems where it is supported. The kernel validates the exact size of the FPSIMD registers so we can't readily add it to fpsimd_context without disruption. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-4-c568edc8ed7f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0e8beb3349ea..460823baa603 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -60,6 +60,7 @@ struct rt_sigframe_user_layout { unsigned long tpidr2_offset; unsigned long za_offset; unsigned long zt_offset; + unsigned long fpmr_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -182,6 +183,8 @@ struct user_ctxs { u32 za_size; struct zt_context __user *zt; u32 zt_size; + struct fpmr_context __user *fpmr; + u32 fpmr_size; }; static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) @@ -227,6 +230,33 @@ static int restore_fpsimd_context(struct user_ctxs *user) return err ? -EFAULT : 0; } +static int preserve_fpmr_context(struct fpmr_context __user *ctx) +{ + int err = 0; + + current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); + + __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(*ctx), &ctx->head.size, err); + __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); + + return err; +} + +static int restore_fpmr_context(struct user_ctxs *user) +{ + u64 fpmr; + int err = 0; + + if (user->fpmr_size != sizeof(*user->fpmr)) + return -EINVAL; + + __get_user_error(fpmr, &user->fpmr->fpmr, err); + if (!err) + write_sysreg_s(fpmr, SYS_FPMR); + + return err; +} #ifdef CONFIG_ARM64_SVE @@ -590,6 +620,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->tpidr2 = NULL; user->za = NULL; user->zt = NULL; + user->fpmr = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -684,6 +715,17 @@ static int parse_user_sigframe(struct user_ctxs *user, user->zt_size = size; break; + case FPMR_MAGIC: + if (!system_supports_fpmr()) + goto invalid; + + if (user->fpmr) + goto invalid; + + user->fpmr = (struct fpmr_context __user *)head; + user->fpmr_size = size; + break; + case EXTRA_MAGIC: if (have_extra_context) goto invalid; @@ -806,6 +848,9 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0 && system_supports_tpidr2() && user.tpidr2) err = restore_tpidr2_context(&user); + if (err == 0 && system_supports_fpmr() && user.fpmr) + err = restore_fpmr_context(&user); + if (err == 0 && system_supports_sme() && user.za) err = restore_za_context(&user); @@ -928,6 +973,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, } } + if (system_supports_fpmr()) { + err = sigframe_alloc(user, &user->fpmr_offset, + sizeof(struct fpmr_context)); + if (err) + return err; + } + return sigframe_alloc_end(user); } @@ -983,6 +1035,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_tpidr2_context(tpidr2_ctx); } + /* FPMR if supported */ + if (system_supports_fpmr() && err == 0) { + struct fpmr_context __user *fpmr_ctx = + apply_user_offset(user, user->fpmr_offset); + err |= preserve_fpmr_context(fpmr_ctx); + } + /* ZA state if present */ if (system_supports_sme() && err == 0 && user->za_offset) { struct za_context __user *za_ctx = -- cgit From 4035c22ef7d43a6c00d6a6584c60e902b95b46af Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 6 Mar 2024 23:14:50 +0000 Subject: arm64/ptrace: Expose FPMR via ptrace Add a new regset to expose FPMR via ptrace. It is not added to the FPSIMD registers since that structure is exposed elsewhere without any allowance for extension we don't add there. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-5-c568edc8ed7f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index dc6cf0e37194..aacb45bd36e6 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -698,6 +698,39 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, return ret; } +static int fpmr_get(struct task_struct *target, const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_fpmr()) + return -EINVAL; + + if (target == current) + fpsimd_preserve_current_state(); + + return membuf_store(&to, target->thread.uw.fpmr); +} + +static int fpmr_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + unsigned long fpmr; + + if (!system_supports_fpmr()) + return -EINVAL; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); + if (ret) + return ret; + + target->thread.uw.fpmr = fpmr; + + fpsimd_flush_task_state(target); + + return 0; +} + static int system_call_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) @@ -1419,6 +1452,7 @@ enum aarch64_regset { REGSET_HW_BREAK, REGSET_HW_WATCH, #endif + REGSET_FPMR, REGSET_SYSTEM_CALL, #ifdef CONFIG_ARM64_SVE REGSET_SVE, @@ -1497,6 +1531,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = system_call_get, .set = system_call_set, }, + [REGSET_FPMR] = { + .core_note_type = NT_ARM_FPMR, + .n = 1, + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpmr_get, + .set = fpmr_set, + }, #ifdef CONFIG_ARM64_SVE [REGSET_SVE] = { /* Scalable Vector Extension */ .core_note_type = NT_ARM_SVE, -- cgit From c1932cac7902a8b0f7355515917dedc5412eb15d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 6 Mar 2024 23:14:51 +0000 Subject: arm64/hwcap: Define hwcaps for 2023 DPISA features The 2023 architecture extensions include a large number of floating point features, most of which simply add new instructions. Add hwcaps so that userspace can enumerate these features. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-6-c568edc8ed7f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 35 +++++++++++++++++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 15 +++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0263565f617a..aefda789f510 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -220,6 +220,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_LUT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0), @@ -235,6 +236,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar3[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -303,6 +305,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_LUTv2_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), @@ -315,6 +319,10 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), @@ -325,10 +333,22 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8FMA_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP4_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP2_SHIFT, 1, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8CVT_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8FMA_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP4_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP2_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E4M3_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E5M2_SHIFT, 1, 0), ARM64_FTR_END, }; @@ -2859,6 +2879,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(ID_AA64PFR2_EL1, FPMR, IMP, CAP_HWCAP, KERNEL_HWCAP_FPMR), HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), @@ -2872,6 +2893,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT), + HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX), HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), @@ -2912,6 +2935,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { #ifdef CONFIG_ARM64_SME HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), @@ -2919,12 +2943,23 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), + HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), + HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), + HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), #endif /* CONFIG_ARM64_SME */ + HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), + HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), + HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4), + HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2), + HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3), + HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2), {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 12b192060156..f0abb150f73e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -128,6 +128,21 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SVE_B16B16] = "sveb16b16", [KERNEL_HWCAP_LRCPC3] = "lrcpc3", [KERNEL_HWCAP_LSE128] = "lse128", + [KERNEL_HWCAP_FPMR] = "fpmr", + [KERNEL_HWCAP_LUT] = "lut", + [KERNEL_HWCAP_FAMINMAX] = "faminmax", + [KERNEL_HWCAP_F8CVT] = "f8cvt", + [KERNEL_HWCAP_F8FMA] = "f8fma", + [KERNEL_HWCAP_F8DP4] = "f8dp4", + [KERNEL_HWCAP_F8DP2] = "f8dp2", + [KERNEL_HWCAP_F8E4M3] = "f8e4m3", + [KERNEL_HWCAP_F8E5M2] = "f8e5m2", + [KERNEL_HWCAP_SME_LUTV2] = "smelutv2", + [KERNEL_HWCAP_SME_F8F16] = "smef8f16", + [KERNEL_HWCAP_SME_F8F32] = "smef8f32", + [KERNEL_HWCAP_SME_SF8FMA] = "smesf8fma", + [KERNEL_HWCAP_SME_SF8DP4] = "smesf8dp4", + [KERNEL_HWCAP_SME_SF8DP2] = "smesf8dp2", }; #ifdef CONFIG_COMPAT -- cgit