diff options
Diffstat (limited to 'arch/x86/kernel/fpu/core.c')
| -rw-r--r-- | arch/x86/kernel/fpu/core.c | 1001 |
1 files changed, 761 insertions, 240 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 2e5003fef51a..da233f20ae6f 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1994 Linus Torvalds * @@ -5,327 +6,725 @@ * General FPU state handling cleanups * Gareth Hughes <gareth@valinux.com>, May 2000 */ -#include <asm/fpu/internal.h> +#include <asm/fpu/api.h> #include <asm/fpu/regset.h> +#include <asm/fpu/sched.h> #include <asm/fpu/signal.h> #include <asm/fpu/types.h> +#include <asm/msr.h> #include <asm/traps.h> #include <asm/irq_regs.h> +#include <uapi/asm/kvm.h> + #include <linux/hardirq.h> +#include <linux/kvm_types.h> #include <linux/pkeys.h> +#include <linux/vmalloc.h> + +#include "context.h" +#include "internal.h" +#include "legacy.h" +#include "xstate.h" #define CREATE_TRACE_POINTS #include <asm/trace/fpu.h> +#ifdef CONFIG_X86_64 +DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); +DEFINE_PER_CPU(u64, xfd_state); +#endif + +/* The FPU state configuration data for kernel and user space */ +struct fpu_state_config fpu_kernel_cfg __ro_after_init; +struct fpu_state_config fpu_user_cfg __ro_after_init; +struct vcpu_fpu_config guest_default_cfg __ro_after_init; + /* * Represents the initial FPU state. It's mostly (but not completely) zeroes, * depending on the FPU hardware format: */ -union fpregs_state init_fpstate __read_mostly; +struct fpstate init_fpstate __ro_after_init; /* - * Track whether the kernel is using the FPU state - * currently. - * - * This flag is used: - * - * - by IRQ context code to potentially use the FPU - * if it's unused. - * - * - to debug kernel_fpu_begin()/end() correctness + * Track FPU initialization and kernel-mode usage. 'true' means the FPU is + * initialized and is not currently being used by the kernel: */ -static DEFINE_PER_CPU(bool, in_kernel_fpu); +DEFINE_PER_CPU(bool, kernel_fpu_allowed); /* * Track which context is using the FPU on the CPU: */ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); -static void kernel_fpu_disable(void) +#ifdef CONFIG_X86_DEBUG_FPU +struct fpu *x86_task_fpu(struct task_struct *task) { - WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); - this_cpu_write(in_kernel_fpu, true); -} + if (WARN_ON_ONCE(task->flags & PF_KTHREAD)) + return NULL; -static void kernel_fpu_enable(void) -{ - WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); - this_cpu_write(in_kernel_fpu, false); + return (void *)task + sizeof(*task); } +#endif -static bool kernel_fpu_disabled(void) +/* + * Can we use the FPU in kernel mode with the + * whole "kernel_fpu_begin/end()" sequence? + */ +bool irq_fpu_usable(void) { - return this_cpu_read(in_kernel_fpu); + if (WARN_ON_ONCE(in_nmi())) + return false; + + /* + * Return false in the following cases: + * + * - FPU is not yet initialized. This can happen only when the call is + * coming from CPU onlining, for example for microcode checksumming. + * - The kernel is already using the FPU, either because of explicit + * nesting (which should never be done), or because of implicit + * nesting when a hardirq interrupted a kernel-mode FPU section. + * + * The single boolean check below handles both cases: + */ + if (!this_cpu_read(kernel_fpu_allowed)) + return false; + + /* + * When not in NMI or hard interrupt context, FPU can be used in: + * + * - Task context except from within fpregs_lock()'ed critical + * regions. + * + * - Soft interrupt processing context which cannot happen + * while in a fpregs_lock()'ed critical region. + */ + if (!in_hardirq()) + return true; + + /* + * In hard interrupt context it's safe when soft interrupts + * are enabled, which means the interrupt did not hit in + * a fpregs_lock()'ed critical region. + */ + return !softirq_count(); } +EXPORT_SYMBOL(irq_fpu_usable); -static bool interrupted_kernel_fpu_idle(void) +/* + * Track AVX512 state use because it is known to slow the max clock + * speed of the core. + */ +static void update_avx_timestamp(struct fpu *fpu) { - return !kernel_fpu_disabled(); + +#define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM) + + if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK) + fpu->avx512_timestamp = jiffies; } /* - * Were we in user mode (or vm86 mode) when we were - * interrupted? + * Save the FPU register state in fpu->fpstate->regs. The register state is + * preserved. * - * Doing kernel_fpu_begin/end() is ok if we are running - * in an interrupt context from user mode - we'll just - * save the FPU state as required. + * Must be called with fpregs_lock() held. + * + * The legacy FNSAVE instruction clears all FPU state unconditionally, so + * register state has to be reloaded. That might be a pointless exercise + * when the FPU is going to be used by another task right after that. But + * this only affects 20+ years old 32bit systems and avoids conditionals all + * over the place. + * + * FXSAVE and all XSAVE variants preserve the FPU register state. */ -static bool interrupted_user_mode(void) +void save_fpregs_to_fpstate(struct fpu *fpu) +{ + if (likely(use_xsave())) { + os_xsave(fpu->fpstate); + update_avx_timestamp(fpu); + return; + } + + if (likely(use_fxsr())) { + fxsave(&fpu->fpstate->regs.fxsave); + return; + } + + /* + * Legacy FPU register saving, FNSAVE always clears FPU registers, + * so we have to reload them from the memory state. + */ + asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave)); + frstor(&fpu->fpstate->regs.fsave); +} + +void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask) +{ + /* + * AMD K7/K8 and later CPUs up to Zen don't save/restore + * FDP/FIP/FOP unless an exception is pending. Clear the x87 state + * here by setting it to fixed values. "m" is a random variable + * that should be in L1. + */ + if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { + asm volatile( + "fnclex\n\t" + "emms\n\t" + "fildl %[addr]" /* set F?P to defined value */ + : : [addr] "m" (*fpstate)); + } + + if (use_xsave()) { + /* + * Dynamically enabled features are enabled in XCR0, but + * usage requires also that the corresponding bits in XFD + * are cleared. If the bits are set then using a related + * instruction will raise #NM. This allows to do the + * allocation of the larger FPU buffer lazy from #NM or if + * the task has no permission to kill it which would happen + * via #UD if the feature is disabled in XCR0. + * + * XFD state is following the same life time rules as + * XSTATE and to restore state correctly XFD has to be + * updated before XRSTORS otherwise the component would + * stay in or go into init state even if the bits are set + * in fpstate::regs::xsave::xfeatures. + */ + xfd_update_state(fpstate); + + /* + * Restoring state always needs to modify all features + * which are in @mask even if the current task cannot use + * extended features. + * + * So fpstate->xfeatures cannot be used here, because then + * a feature for which the task has no permission but was + * used by the previous task would not go into init state. + */ + mask = fpu_kernel_cfg.max_features & mask; + + os_xrstor(fpstate, mask); + } else { + if (use_fxsr()) + fxrstor(&fpstate->regs.fxsave); + else + frstor(&fpstate->regs.fsave); + } +} + +void fpu_reset_from_exception_fixup(void) +{ + restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE); +} + +#if IS_ENABLED(CONFIG_KVM) +static void __fpstate_reset(struct fpstate *fpstate); + +static void fpu_lock_guest_permissions(void) +{ + struct fpu_state_perm *fpuperm; + u64 perm; + + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + spin_lock_irq(¤t->sighand->siglock); + fpuperm = &x86_task_fpu(current->group_leader)->guest_perm; + perm = fpuperm->__state_perm; + + /* First fpstate allocation locks down permissions. */ + WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED); + + spin_unlock_irq(¤t->sighand->siglock); +} + +bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) +{ + struct fpstate *fpstate; + unsigned int size; + + size = guest_default_cfg.size + ALIGN(offsetof(struct fpstate, regs), 64); + + fpstate = vzalloc(size); + if (!fpstate) + return false; + + /* Initialize indicators to reflect properties of the fpstate */ + fpstate->is_valloc = true; + fpstate->is_guest = true; + + __fpstate_reset(fpstate); + fpstate_init_user(fpstate); + + gfpu->fpstate = fpstate; + gfpu->xfeatures = guest_default_cfg.features; + + /* + * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state + * to userspace, even when XSAVE is unsupported, so that restoring FPU + * state on a different CPU that does support XSAVE can cleanly load + * the incoming state using its natural XSAVE. In other words, KVM's + * uABI size may be larger than this host's default size. Conversely, + * the default size should never be larger than KVM's base uABI size; + * all features that can expand the uABI size must be opt-in. + */ + gfpu->uabi_size = sizeof(struct kvm_xsave); + if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size)) + gfpu->uabi_size = fpu_user_cfg.default_size; + + fpu_lock_guest_permissions(); + + return true; +} +EXPORT_SYMBOL_FOR_KVM(fpu_alloc_guest_fpstate); + +void fpu_free_guest_fpstate(struct fpu_guest *gfpu) { - struct pt_regs *regs = get_irq_regs(); - return regs && user_mode(regs); + struct fpstate *fpstate = gfpu->fpstate; + + if (!fpstate) + return; + + if (WARN_ON_ONCE(!fpstate->is_valloc || !fpstate->is_guest || fpstate->in_use)) + return; + + gfpu->fpstate = NULL; + vfree(fpstate); } +EXPORT_SYMBOL_FOR_KVM(fpu_free_guest_fpstate); /* - * Can we use the FPU in kernel mode with the - * whole "kernel_fpu_begin/end()" sequence? + * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable + * @guest_fpu: Pointer to the guest FPU container + * @xfeatures: Features requested by guest CPUID + * + * Enable all dynamic xfeatures according to guest perm and requested CPUID. + * + * Return: 0 on success, error code otherwise + */ +int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures) +{ + lockdep_assert_preemption_enabled(); + + /* Nothing to do if all requested features are already enabled. */ + xfeatures &= ~guest_fpu->xfeatures; + if (!xfeatures) + return 0; + + return __xfd_enable_feature(xfeatures, guest_fpu); +} +EXPORT_SYMBOL_FOR_KVM(fpu_enable_guest_xfd_features); + +#ifdef CONFIG_X86_64 +void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) +{ + fpregs_lock(); + guest_fpu->fpstate->xfd = xfd; + if (guest_fpu->fpstate->in_use) + xfd_update_state(guest_fpu->fpstate); + fpregs_unlock(); +} +EXPORT_SYMBOL_FOR_KVM(fpu_update_guest_xfd); + +/** + * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state * - * It's always ok in process context (ie "not interrupt") - * but it is sometimes ok even from an irq. + * Must be invoked from KVM after a VMEXIT before enabling interrupts when + * XFD write emulation is disabled. This is required because the guest can + * freely modify XFD and the state at VMEXIT is not guaranteed to be the + * same as the state on VMENTER. So software state has to be updated before + * any operation which depends on it can take place. + * + * Note: It can be invoked unconditionally even when write emulation is + * enabled for the price of a then pointless MSR read. */ -bool irq_fpu_usable(void) +void fpu_sync_guest_vmexit_xfd_state(void) { - return !in_interrupt() || - interrupted_user_mode() || - interrupted_kernel_fpu_idle(); + struct fpstate *fpstate = x86_task_fpu(current)->fpstate; + + lockdep_assert_irqs_disabled(); + if (fpu_state_size_dynamic()) { + rdmsrq(MSR_IA32_XFD, fpstate->xfd); + __this_cpu_write(xfd_state, fpstate->xfd); + } } -EXPORT_SYMBOL(irq_fpu_usable); +EXPORT_SYMBOL_FOR_KVM(fpu_sync_guest_vmexit_xfd_state); +#endif /* CONFIG_X86_64 */ -static void __kernel_fpu_begin(void) +int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) { - struct fpu *fpu = ¤t->thread.fpu; - - WARN_ON_FPU(!irq_fpu_usable()); + struct fpstate *guest_fps = guest_fpu->fpstate; + struct fpu *fpu = x86_task_fpu(current); + struct fpstate *cur_fps = fpu->fpstate; + + fpregs_lock(); + if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD)) + save_fpregs_to_fpstate(fpu); + + /* Swap fpstate */ + if (enter_guest) { + fpu->__task_fpstate = cur_fps; + fpu->fpstate = guest_fps; + guest_fps->in_use = true; + } else { + guest_fps->in_use = false; + fpu->fpstate = fpu->__task_fpstate; + fpu->__task_fpstate = NULL; + } - kernel_fpu_disable(); + cur_fps = fpu->fpstate; - if (fpu->initialized) { + if (!cur_fps->is_confidential) { + /* Includes XFD update */ + restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE); + } else { /* - * Ignore return value -- we don't care if reg state - * is clobbered. + * XSTATE is restored by firmware from encrypted + * memory. Make sure XFD state is correct while + * running with guest fpstate */ - copy_fpregs_to_fpstate(fpu); + xfd_update_state(cur_fps); + } + + fpregs_mark_activate(); + fpregs_unlock(); + return 0; +} +EXPORT_SYMBOL_FOR_KVM(fpu_swap_kvm_fpstate); + +void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, + unsigned int size, u64 xfeatures, u32 pkru) +{ + struct fpstate *kstate = gfpu->fpstate; + union fpregs_state *ustate = buf; + struct membuf mb = { .p = buf, .left = size }; + + if (cpu_feature_enabled(X86_FEATURE_XSAVE)) { + __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru, + XSTATE_COPY_XSAVE); } else { - __cpu_invalidate_fpregs_state(); + memcpy(&ustate->fxsave, &kstate->regs.fxsave, + sizeof(ustate->fxsave)); + /* Make it restorable on a XSAVE enabled host */ + ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE; } } +EXPORT_SYMBOL_FOR_KVM(fpu_copy_guest_fpstate_to_uabi); -static void __kernel_fpu_end(void) +int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, + u64 xcr0, u32 *vpkru) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpstate *kstate = gfpu->fpstate; + const union fpregs_state *ustate = buf; + + if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) { + if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE) + return -EINVAL; + if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask) + return -EINVAL; + memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave)); + return 0; + } - if (fpu->initialized) - copy_kernel_to_fpregs(&fpu->state); + if (ustate->xsave.header.xfeatures & ~xcr0) + return -EINVAL; - kernel_fpu_enable(); + /* + * Nullify @vpkru to preserve its current value if PKRU's bit isn't set + * in the header. KVM's odd ABI is to leave PKRU untouched in this + * case (all other components are eventually re-initialized). + */ + if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU)) + vpkru = NULL; + + return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru); } +EXPORT_SYMBOL_FOR_KVM(fpu_copy_uabi_to_guest_fpstate); +#endif /* CONFIG_KVM */ -void kernel_fpu_begin(void) +void kernel_fpu_begin_mask(unsigned int kfpu_mask) { - preempt_disable(); - __kernel_fpu_begin(); + if (!irqs_disabled()) + fpregs_lock(); + + WARN_ON_FPU(!irq_fpu_usable()); + + /* Toggle kernel_fpu_allowed to false: */ + WARN_ON_FPU(!this_cpu_read(kernel_fpu_allowed)); + this_cpu_write(kernel_fpu_allowed, false); + + if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && + !test_thread_flag(TIF_NEED_FPU_LOAD)) { + set_thread_flag(TIF_NEED_FPU_LOAD); + save_fpregs_to_fpstate(x86_task_fpu(current)); + } + __cpu_invalidate_fpregs_state(); + + /* Put sane initial values into the control registers. */ + if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) + ldmxcsr(MXCSR_DEFAULT); + + if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) + asm volatile ("fninit"); } -EXPORT_SYMBOL_GPL(kernel_fpu_begin); +EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { - __kernel_fpu_end(); - preempt_enable(); + /* Toggle kernel_fpu_allowed back to true: */ + WARN_ON_FPU(this_cpu_read(kernel_fpu_allowed)); + this_cpu_write(kernel_fpu_allowed, true); + + if (!irqs_disabled()) + fpregs_unlock(); } EXPORT_SYMBOL_GPL(kernel_fpu_end); /* - * Save the FPU state (mark it for reload if necessary): - * - * This only ever gets called for the current task. + * Sync the FPU register state to current's memory register state when the + * current task owns the FPU. The hardware register state is preserved. */ -void fpu__save(struct fpu *fpu) +void fpu_sync_fpstate(struct fpu *fpu) { - WARN_ON_FPU(fpu != ¤t->thread.fpu); + WARN_ON_FPU(fpu != x86_task_fpu(current)); - preempt_disable(); + fpregs_lock(); trace_x86_fpu_before_save(fpu); - if (fpu->initialized) { - if (!copy_fpregs_to_fpstate(fpu)) { - copy_kernel_to_fpregs(&fpu->state); - } - } + + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) + save_fpregs_to_fpstate(fpu); + trace_x86_fpu_after_save(fpu); - preempt_enable(); + fpregs_unlock(); +} + +static inline unsigned int init_fpstate_copy_size(void) +{ + if (!use_xsave()) + return fpu_kernel_cfg.default_size; + + /* XSAVE(S) just needs the legacy and the xstate header part */ + return sizeof(init_fpstate.regs.xsave); +} + +static inline void fpstate_init_fxstate(struct fpstate *fpstate) +{ + fpstate->regs.fxsave.cwd = 0x37f; + fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT; } -EXPORT_SYMBOL_GPL(fpu__save); /* * Legacy x87 fpstate state init: */ -static inline void fpstate_init_fstate(struct fregs_state *fp) +static inline void fpstate_init_fstate(struct fpstate *fpstate) { - fp->cwd = 0xffff037fu; - fp->swd = 0xffff0000u; - fp->twd = 0xffffffffu; - fp->fos = 0xffff0000u; + fpstate->regs.fsave.cwd = 0xffff037fu; + fpstate->regs.fsave.swd = 0xffff0000u; + fpstate->regs.fsave.twd = 0xffffffffu; + fpstate->regs.fsave.fos = 0xffff0000u; } -void fpstate_init(union fpregs_state *state) +/* + * Used in two places: + * 1) Early boot to setup init_fpstate for non XSAVE systems + * 2) fpu_alloc_guest_fpstate() which is invoked from KVM + */ +void fpstate_init_user(struct fpstate *fpstate) { - if (!static_cpu_has(X86_FEATURE_FPU)) { - fpstate_init_soft(&state->soft); + if (!cpu_feature_enabled(X86_FEATURE_FPU)) { + fpstate_init_soft(&fpstate->regs.soft); return; } - memset(state, 0, fpu_kernel_xstate_size); + xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures); - if (static_cpu_has(X86_FEATURE_XSAVES)) - fpstate_init_xstate(&state->xsave); - if (static_cpu_has(X86_FEATURE_FXSR)) - fpstate_init_fxstate(&state->fxsave); + if (cpu_feature_enabled(X86_FEATURE_FXSR)) + fpstate_init_fxstate(fpstate); else - fpstate_init_fstate(&state->fsave); + fpstate_init_fstate(fpstate); } -EXPORT_SYMBOL_GPL(fpstate_init); -int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) +static void __fpstate_reset(struct fpstate *fpstate) { - dst_fpu->last_cpu = -1; - - if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) - return 0; - - WARN_ON_FPU(src_fpu != ¤t->thread.fpu); - /* - * Don't let 'init optimized' areas of the XSAVE area - * leak into the child task: - */ - memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size); - - /* - * Save current FPU registers directly into the child - * FPU context, without any memory-to-memory copying. + * Supervisor features (and thus sizes) may diverge between guest + * FPUs and host FPUs, as some supervisor features are supported + * for guests despite not being utilized by the host. User + * features and sizes are always identical, which allows for + * common guest and userspace ABI. * - * ( The function 'fails' in the FNSAVE case, which destroys - * register contents so we have to copy them back. ) + * For the host, set XFD to the kernel's desired initialization + * value. For guests, set XFD to its architectural RESET value. */ - if (!copy_fpregs_to_fpstate(dst_fpu)) { - memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size); - copy_kernel_to_fpregs(&src_fpu->state); + if (fpstate->is_guest) { + fpstate->size = guest_default_cfg.size; + fpstate->xfeatures = guest_default_cfg.features; + fpstate->xfd = 0; + } else { + fpstate->size = fpu_kernel_cfg.default_size; + fpstate->xfeatures = fpu_kernel_cfg.default_features; + fpstate->xfd = init_fpstate.xfd; } - trace_x86_fpu_copy_src(src_fpu); - trace_x86_fpu_copy_dst(dst_fpu); - - return 0; + fpstate->user_size = fpu_user_cfg.default_size; + fpstate->user_xfeatures = fpu_user_cfg.default_features; } -/* - * Activate the current task's in-memory FPU context, - * if it has not been used before: - */ -void fpu__initialize(struct fpu *fpu) +void fpstate_reset(struct fpu *fpu) { - WARN_ON_FPU(fpu != ¤t->thread.fpu); + /* Set the fpstate pointer to the default fpstate */ + fpu->fpstate = &fpu->__fpstate; + __fpstate_reset(fpu->fpstate); - if (!fpu->initialized) { - fpstate_init(&fpu->state); - trace_x86_fpu_init_state(fpu); + /* Initialize the permission related info in fpu */ + fpu->perm.__state_perm = fpu_kernel_cfg.default_features; + fpu->perm.__state_size = fpu_kernel_cfg.default_size; + fpu->perm.__user_state_size = fpu_user_cfg.default_size; - trace_x86_fpu_activate_state(fpu); - /* Safe to do for the current task: */ - fpu->initialized = 1; + fpu->guest_perm.__state_perm = guest_default_cfg.features; + fpu->guest_perm.__state_size = guest_default_cfg.size; + /* + * User features and sizes are always identical between host and + * guest FPUs, which allows for common guest and userspace ABI. + */ + fpu->guest_perm.__user_state_size = fpu_user_cfg.default_size; +} + +static inline void fpu_inherit_perms(struct fpu *dst_fpu) +{ + if (fpu_state_size_dynamic()) { + struct fpu *src_fpu = x86_task_fpu(current->group_leader); + + spin_lock_irq(¤t->sighand->siglock); + /* Fork also inherits the permissions of the parent */ + dst_fpu->perm = src_fpu->perm; + dst_fpu->guest_perm = src_fpu->guest_perm; + spin_unlock_irq(¤t->sighand->siglock); } } -EXPORT_SYMBOL_GPL(fpu__initialize); -/* - * This function must be called before we read a task's fpstate. - * - * There's two cases where this gets called: - * - * - for the current task (when coredumping), in which case we have - * to save the latest FPU registers into the fpstate, - * - * - or it's called for stopped tasks (ptrace), in which case the - * registers were already saved by the context-switch code when - * the task scheduled out - we only have to initialize the registers - * if they've never been initialized. - * - * If the task has used the FPU before then save it. - */ -void fpu__prepare_read(struct fpu *fpu) +/* A passed ssp of zero will not cause any update */ +static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp) { - if (fpu == ¤t->thread.fpu) { - fpu__save(fpu); - } else { - if (!fpu->initialized) { - fpstate_init(&fpu->state); - trace_x86_fpu_init_state(fpu); +#ifdef CONFIG_X86_USER_SHADOW_STACK + struct cet_user_state *xstate; - trace_x86_fpu_activate_state(fpu); - /* Safe to do for current and for stopped child tasks: */ - fpu->initialized = 1; - } - } + /* If ssp update is not needed. */ + if (!ssp) + return 0; + + xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave, + XFEATURE_CET_USER); + + /* + * If there is a non-zero ssp, then 'dst' must be configured with a shadow + * stack and the fpu state should be up to date since it was just copied + * from the parent in fpu_clone(). So there must be a valid non-init CET + * state location in the buffer. + */ + if (WARN_ON_ONCE(!xstate)) + return 1; + + xstate->user_ssp = (u64)ssp; +#endif + return 0; } -/* - * This function must be called before we write a task's fpstate. - * - * If the task has used the FPU before then invalidate any cached FPU registers. - * If the task has not used the FPU before then initialize its fpstate. - * - * After this function call, after registers in the fpstate are - * modified and the child task has woken up, the child task will - * restore the modified FPU state from the modified context. If we - * didn't clear its cached status here then the cached in-registers - * state pending on its former CPU could be restored, corrupting - * the modifications. - */ -void fpu__prepare_write(struct fpu *fpu) +/* Clone current's FPU state on fork */ +int fpu_clone(struct task_struct *dst, u64 clone_flags, bool minimal, + unsigned long ssp) { /* - * Only stopped child tasks can be used to modify the FPU - * state in the fpstate buffer: + * We allocate the new FPU structure right after the end of the task struct. + * task allocation size already took this into account. + * + * This is safe because task_struct size is a multiple of cacheline size, + * thus x86_task_fpu() will always be cacheline aligned as well. */ - WARN_ON_FPU(fpu == ¤t->thread.fpu); + struct fpu *dst_fpu = (void *)dst + sizeof(*dst); - if (fpu->initialized) { - /* Invalidate any cached state: */ - __fpu_invalidate_fpregs_state(fpu); - } else { - fpstate_init(&fpu->state); - trace_x86_fpu_init_state(fpu); + BUILD_BUG_ON(sizeof(*dst) % SMP_CACHE_BYTES != 0); + + /* The new task's FPU state cannot be valid in the hardware. */ + dst_fpu->last_cpu = -1; - trace_x86_fpu_activate_state(fpu); - /* Safe to do for stopped child tasks: */ - fpu->initialized = 1; + fpstate_reset(dst_fpu); + + if (!cpu_feature_enabled(X86_FEATURE_FPU)) + return 0; + + /* + * Enforce reload for user space tasks and prevent kernel threads + * from trying to save the FPU registers on context switch. + */ + set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD); + + /* + * No FPU state inheritance for kernel threads and IO + * worker threads. + */ + if (minimal) { + /* Clear out the minimal state */ + memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs, + init_fpstate_copy_size()); + return 0; } + + /* + * If a new feature is added, ensure all dynamic features are + * caller-saved from here! + */ + BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); + + /* + * Save the default portion of the current FPU state into the + * clone. Assume all dynamic features to be defined as caller- + * saved, which enables skipping both the expansion of fpstate + * and the copying of any dynamic state. + * + * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because + * copying is not valid when current uses non-default states. + */ + fpregs_lock(); + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + fpregs_restore_userregs(); + save_fpregs_to_fpstate(dst_fpu); + fpregs_unlock(); + if (!(clone_flags & CLONE_THREAD)) + fpu_inherit_perms(dst_fpu); + + /* + * Children never inherit PASID state. + * Force it to have its init value: + */ + if (use_xsave()) + dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID; + + /* + * Update shadow stack pointer, in case it changed during clone. + */ + if (update_fpu_shstk(dst, ssp)) + return 1; + + trace_x86_fpu_copy_dst(dst_fpu); + + return 0; } /* - * 'fpu__restore()' is called to copy FPU registers from - * the FPU fpstate to the live hw registers and to activate - * access to the hardware registers, so that FPU instructions - * can be used afterwards. - * - * Must be called with kernel preemption disabled (for example - * with local interrupts disabled, as it is in the case of - * do_device_not_available()). + * While struct fpu is no longer part of struct thread_struct, it is still + * allocated after struct task_struct in the "task_struct" kmem cache. But + * since FPU is expected to be part of struct thread_struct, we have to + * adjust for it here. */ -void fpu__restore(struct fpu *fpu) +void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { - fpu__initialize(fpu); - - /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ - kernel_fpu_disable(); - trace_x86_fpu_before_restore(fpu); - fpregs_activate(fpu); - copy_kernel_to_fpregs(&fpu->state); - trace_x86_fpu_after_restore(fpu); - kernel_fpu_enable(); + /* The allocation follows struct task_struct. */ + *offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread); + *offset += offsetof(struct fpu, __fpstate.regs); + *size = fpu_kernel_cfg.default_size; } -EXPORT_SYMBOL_GPL(fpu__restore); /* * Drops current FPU state: deactivates the fpregs and @@ -336,66 +735,174 @@ EXPORT_SYMBOL_GPL(fpu__restore); * a state-restore is coming: either an explicit one, * or a reschedule. */ -void fpu__drop(struct fpu *fpu) +void fpu__drop(struct task_struct *tsk) { + struct fpu *fpu; + + if (test_tsk_thread_flag(tsk, TIF_NEED_FPU_LOAD)) + return; + + fpu = x86_task_fpu(tsk); + preempt_disable(); - if (fpu == ¤t->thread.fpu) { - if (fpu->initialized) { - /* Ignore delayed exceptions from user space */ - asm volatile("1: fwait\n" - "2:\n" - _ASM_EXTABLE(1b, 2b)); - fpregs_deactivate(fpu); - } + if (fpu == x86_task_fpu(current)) { + /* Ignore delayed exceptions from user space */ + asm volatile("1: fwait\n" + "2:\n" + _ASM_EXTABLE(1b, 2b)); + fpregs_deactivate(fpu); } - fpu->initialized = 0; - trace_x86_fpu_dropped(fpu); preempt_enable(); } /* - * Clear FPU registers by setting them up from - * the init fpstate: + * Clear FPU registers by setting them up from the init fpstate. + * Caller must do fpregs_[un]lock() around it. */ -static inline void copy_init_fpstate_to_fpregs(void) +static inline void restore_fpregs_from_init_fpstate(u64 features_mask) { if (use_xsave()) - copy_kernel_to_xregs(&init_fpstate.xsave, -1); - else if (static_cpu_has(X86_FEATURE_FXSR)) - copy_kernel_to_fxregs(&init_fpstate.fxsave); + os_xrstor(&init_fpstate, features_mask); + else if (use_fxsr()) + fxrstor(&init_fpstate.regs.fxsave); else - copy_kernel_to_fregs(&init_fpstate.fsave); + frstor(&init_fpstate.regs.fsave); - if (boot_cpu_has(X86_FEATURE_OSPKE)) - copy_init_pkru_to_fpregs(); + pkru_write_default(); } /* - * Clear the FPU state back to init state. - * - * Called by sys_execve(), by the signal handler code and by various - * error paths. + * Reset current->fpu memory state to the init values. */ -void fpu__clear(struct fpu *fpu) +static void fpu_reset_fpstate_regs(void) { - WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ - - fpu__drop(fpu); + struct fpu *fpu = x86_task_fpu(current); + fpregs_lock(); + __fpu_invalidate_fpregs_state(fpu); /* - * Make sure fpstate is cleared and initialized. + * This does not change the actual hardware registers. It just + * resets the memory image and sets TIF_NEED_FPU_LOAD so a + * subsequent return to usermode will reload the registers from the + * task's memory image. + * + * Do not use fpstate_init() here. Just copy init_fpstate which has + * the correct content already except for PKRU. + * + * PKRU handling does not rely on the xstate when restoring for + * user space as PKRU is eagerly written in switch_to() and + * flush_thread(). */ - if (static_cpu_has(X86_FEATURE_FPU)) { - preempt_disable(); - fpu__initialize(fpu); - user_fpu_begin(); - copy_init_fpstate_to_fpregs(); - preempt_enable(); + memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size()); + set_thread_flag(TIF_NEED_FPU_LOAD); + fpregs_unlock(); +} + +/* + * Reset current's user FPU states to the init states. current's + * supervisor states, if any, are not modified by this function. The + * caller guarantees that the XSTATE header in memory is intact. + */ +void fpu__clear_user_states(struct fpu *fpu) +{ + WARN_ON_FPU(fpu != x86_task_fpu(current)); + + fpregs_lock(); + if (!cpu_feature_enabled(X86_FEATURE_FPU)) { + fpu_reset_fpstate_regs(); + fpregs_unlock(); + return; } + + /* + * Ensure that current's supervisor states are loaded into their + * corresponding registers. + */ + if (xfeatures_mask_supervisor() && + !fpregs_state_valid(fpu, smp_processor_id())) + os_xrstor_supervisor(fpu->fpstate); + + /* Ensure XFD state is in sync before reloading XSTATE */ + xfd_update_state(fpu->fpstate); + + /* Reset user states in registers. */ + restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); + + /* + * Now all FPU registers have their desired values. Inform the FPU + * state machine that current's FPU registers are in the hardware + * registers. The memory image does not need to be updated because + * any operation relying on it has to save the registers first when + * current's FPU is marked active. + */ + fpregs_mark_activate(); + fpregs_unlock(); +} + +void fpu_flush_thread(void) +{ + fpstate_reset(x86_task_fpu(current)); + fpu_reset_fpstate_regs(); +} +/* + * Load FPU context before returning to userspace. + */ +void switch_fpu_return(void) +{ + if (!static_cpu_has(X86_FEATURE_FPU)) + return; + + fpregs_restore_userregs(); +} +EXPORT_SYMBOL_FOR_KVM(switch_fpu_return); + +void fpregs_lock_and_load(void) +{ + /* + * fpregs_lock() only disables preemption (mostly). So modifying state + * in an interrupt could screw up some in progress fpregs operation. + * Warn about it. + */ + WARN_ON_ONCE(!irq_fpu_usable()); + WARN_ON_ONCE(current->flags & PF_KTHREAD); + + fpregs_lock(); + + fpregs_assert_state_consistent(); + + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + fpregs_restore_userregs(); +} + +#ifdef CONFIG_X86_DEBUG_FPU +/* + * If current FPU state according to its tracking (loaded FPU context on this + * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is + * loaded on return to userland. + */ +void fpregs_assert_state_consistent(void) +{ + struct fpu *fpu = x86_task_fpu(current); + + if (test_thread_flag(TIF_NEED_FPU_LOAD)) + return; + + WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id())); +} +EXPORT_SYMBOL_FOR_KVM(fpregs_assert_state_consistent); +#endif + +void fpregs_mark_activate(void) +{ + struct fpu *fpu = x86_task_fpu(current); + + fpregs_activate(fpu); + fpu->last_cpu = smp_processor_id(); + clear_thread_flag(TIF_NEED_FPU_LOAD); } /* @@ -419,11 +926,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) * fully reproduce the context of the exception. */ if (boot_cpu_has(X86_FEATURE_FXSR)) { - cwd = fpu->state.fxsave.cwd; - swd = fpu->state.fxsave.swd; + cwd = fpu->fpstate->regs.fxsave.cwd; + swd = fpu->fpstate->regs.fxsave.swd; } else { - cwd = (unsigned short)fpu->state.fsave.cwd; - swd = (unsigned short)fpu->state.fsave.swd; + cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd; + swd = (unsigned short)fpu->fpstate->regs.fsave.swd; } err = swd & ~cwd; @@ -437,7 +944,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) unsigned short mxcsr = MXCSR_DEFAULT; if (boot_cpu_has(X86_FEATURE_XMM)) - mxcsr = fpu->state.fxsave.mxcsr; + mxcsr = fpu->fpstate->regs.fxsave.mxcsr; err = ~(mxcsr >> 7) & mxcsr; } @@ -466,3 +973,17 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) */ return 0; } + +/* + * Initialize register state that may prevent from entering low-power idle. + * This function will be invoked from the cpuidle driver only when needed. + */ +noinstr void fpu_idle_fpregs(void) +{ + /* Note: AMX_TILE being enabled implies XGETBV1 support */ + if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) && + (xfeatures_in_use() & XFEATURE_MASK_XTILE)) { + tile_release(); + __this_cpu_write(fpu_fpregs_owner_ctx, NULL); + } +} |
