summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/cpu/common.c37
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c282
-rw-r--r--arch/x86/kernel/fpu/init.c15
-rw-r--r--arch/x86/kernel/fpu/regset.c223
-rw-r--r--arch/x86/kernel/fpu/signal.c438
-rw-r--r--arch/x86/kernel/fpu/xstate.c644
-rw-r--r--arch/x86/kernel/process.c22
-rw-r--r--arch/x86/kernel/process_64.c28
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/signal.c88
-rw-r--r--arch/x86/kernel/trace.c234
-rw-r--r--arch/x86/kernel/traps.c5
14 files changed, 1125 insertions, 899 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0f66682ac02a..3e625c61f008 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a99d00393206..64b805bd6a54 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -58,6 +58,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
+#include <asm/sigframe.h>
#include "cpu.h"
@@ -465,27 +466,22 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
- struct pkru_state *pk;
+ if (c == &boot_cpu_data) {
+ if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
+ return;
+ /*
+ * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
+ * bit to be set. Enforce it.
+ */
+ setup_force_cpu_cap(X86_FEATURE_OSPKE);
- /* check the boot processor, plus compile options for PKU: */
- if (!cpu_feature_enabled(X86_FEATURE_PKU))
- return;
- /* checks the actual processor's cpuid bits: */
- if (!cpu_has(c, X86_FEATURE_PKU))
- return;
- if (pku_disabled)
+ } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
return;
+ }
cr4_set_bits(X86_CR4_PKE);
- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
- if (pk)
- pk->pkru = init_pkru_value;
- /*
- * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
- * cpuid bit to be set. We need to ensure that we
- * update that bit in this CPU's "cpu_info".
- */
- set_cpu_cap(c, X86_FEATURE_OSPKE);
+ /* Load the default PKRU value */
+ pkru_write_default();
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -1332,6 +1328,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
fpu__init_system(c);
+ init_sigframe_size();
+
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
@@ -1717,9 +1715,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
}
/*
- * clearcpuid= was already parsed in fpu__init_parse_early_param.
- * But we need to keep a dummy __setup around otherwise it would
- * show up as an environment variable for init.
+ * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy
+ * function prevents it from becoming an environment variable for init.
*/
static __init int setup_clearcpuid(char *arg)
{
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 299c20f0a38b..ea4fe192189d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -69,7 +69,7 @@ static void printk_stack_address(unsigned long address, int reliable,
const char *log_lvl)
{
touch_nmi_watchdog();
- printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
+ printk("%s %s%pBb\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 571220ac8bea..7ada7bd03a32 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -23,7 +23,7 @@
* Represents the initial FPU state. It's mostly (but not completely) zeroes,
* depending on the FPU hardware format:
*/
-union fpregs_state init_fpstate __read_mostly;
+union fpregs_state init_fpstate __ro_after_init;
/*
* Track whether the kernel is using the FPU state
@@ -83,19 +83,23 @@ bool irq_fpu_usable(void)
EXPORT_SYMBOL(irq_fpu_usable);
/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact and we can
- * keep registers active.
+ * Save the FPU register state in fpu->state. The register state is
+ * preserved.
*
- * The legacy FNSAVE instruction cleared all FPU state
- * unconditionally, so registers are essentially destroyed.
- * Modern FPU state can be kept in registers, if there are
- * no pending FP exceptions.
+ * Must be called with fpregs_lock() held.
+ *
+ * The legacy FNSAVE instruction clears all FPU state unconditionally, so
+ * register state has to be reloaded. That might be a pointless exercise
+ * when the FPU is going to be used by another task right after that. But
+ * this only affects 20+ years old 32bit systems and avoids conditionals all
+ * over the place.
+ *
+ * FXSAVE and all XSAVE variants preserve the FPU register state.
*/
-int copy_fpregs_to_fpstate(struct fpu *fpu)
+void save_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
- copy_xregs_to_kernel(&fpu->state.xsave);
+ os_xsave(&fpu->state.xsave);
/*
* AVX512 state is tracked here because its use is
@@ -103,23 +107,49 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
*/
if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
fpu->avx512_timestamp = jiffies;
- return 1;
+ return;
}
if (likely(use_fxsr())) {
- copy_fxregs_to_kernel(fpu);
- return 1;
+ fxsave(&fpu->state.fxsave);
+ return;
}
/*
* Legacy FPU register saving, FNSAVE always clears FPU registers,
- * so we have to mark them inactive:
+ * so we have to reload them from the memory state.
*/
asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
+ frstor(&fpu->state.fsave);
+}
+EXPORT_SYMBOL(save_fpregs_to_fpstate);
- return 0;
+void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
+{
+ /*
+ * AMD K7/K8 and later CPUs up to Zen don't save/restore
+ * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
+ * here by setting it to fixed values. "m" is a random variable
+ * that should be in L1.
+ */
+ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (fpstate));
+ }
+
+ if (use_xsave()) {
+ os_xrstor(&fpstate->xsave, mask);
+ } else {
+ if (use_fxsr())
+ fxrstor(&fpstate->fxsave);
+ else
+ frstor(&fpstate->fsave);
+ }
}
-EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+EXPORT_SYMBOL_GPL(__restore_fpregs_from_fpstate);
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
@@ -133,11 +163,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
if (!(current->flags & PF_KTHREAD) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
- /*
- * Ignore return value -- we don't care if reg state
- * is clobbered.
- */
- copy_fpregs_to_fpstate(&current->thread.fpu);
+ save_fpregs_to_fpstate(&current->thread.fpu);
}
__cpu_invalidate_fpregs_state();
@@ -160,27 +186,38 @@ void kernel_fpu_end(void)
EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
- * Save the FPU state (mark it for reload if necessary):
- *
- * This only ever gets called for the current task.
+ * Sync the FPU register state to current's memory register state when the
+ * current task owns the FPU. The hardware register state is preserved.
*/
-void fpu__save(struct fpu *fpu)
+void fpu_sync_fpstate(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
fpregs_lock();
trace_x86_fpu_before_save(fpu);
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- if (!copy_fpregs_to_fpstate(fpu)) {
- copy_kernel_to_fpregs(&fpu->state);
- }
- }
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+ save_fpregs_to_fpstate(fpu);
trace_x86_fpu_after_save(fpu);
fpregs_unlock();
}
+static inline void fpstate_init_xstate(struct xregs_state *xsave)
+{
+ /*
+ * XRSTORS requires these bits set in xcomp_bv, or it will
+ * trigger #GP:
+ */
+ xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
+}
+
+static inline void fpstate_init_fxstate(struct fxregs_state *fx)
+{
+ fx->cwd = 0x37f;
+ fx->mxcsr = MXCSR_DEFAULT;
+}
+
/*
* Legacy x87 fpstate state init:
*/
@@ -210,18 +247,18 @@ void fpstate_init(union fpregs_state *state)
}
EXPORT_SYMBOL_GPL(fpstate_init);
-int fpu__copy(struct task_struct *dst, struct task_struct *src)
+/* Clone current's FPU state on fork */
+int fpu_clone(struct task_struct *dst)
{
+ struct fpu *src_fpu = &current->thread.fpu;
struct fpu *dst_fpu = &dst->thread.fpu;
- struct fpu *src_fpu = &src->thread.fpu;
+ /* The new task's FPU state cannot be valid in the hardware. */
dst_fpu->last_cpu = -1;
- if (!static_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return 0;
- WARN_ON_FPU(src_fpu != &current->thread.fpu);
-
/*
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
@@ -229,20 +266,16 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
- * If the FPU registers are not current just memcpy() the state.
- * Otherwise save current FPU registers directly into the child's FPU
- * context, without any memory-to-memory copying.
- *
- * ( The function 'fails' in the FNSAVE case, which destroys
- * register contents so we have to load them back. )
+ * If the FPU registers are not owned by current just memcpy() the
+ * state. Otherwise save the FPU registers directly into the
+ * child's FPU context, without any memory-to-memory copying.
*/
fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
- else if (!copy_fpregs_to_fpstate(dst_fpu))
- copy_kernel_to_fpregs(&dst_fpu->state);
-
+ else
+ save_fpregs_to_fpstate(dst_fpu);
fpregs_unlock();
set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
@@ -254,63 +287,6 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
}
/*
- * Activate the current task's in-memory FPU context,
- * if it has not been used before:
- */
-static void fpu__initialize(struct fpu *fpu)
-{
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
- set_thread_flag(TIF_NEED_FPU_LOAD);
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-}
-
-/*
- * This function must be called before we read a task's fpstate.
- *
- * There's two cases where this gets called:
- *
- * - for the current task (when coredumping), in which case we have
- * to save the latest FPU registers into the fpstate,
- *
- * - or it's called for stopped tasks (ptrace), in which case the
- * registers were already saved by the context-switch code when
- * the task scheduled out.
- *
- * If the task has used the FPU before then save it.
- */
-void fpu__prepare_read(struct fpu *fpu)
-{
- if (fpu == &current->thread.fpu)
- fpu__save(fpu);
-}
-
-/*
- * This function must be called before we write a task's fpstate.
- *
- * Invalidate any cached FPU registers.
- *
- * After this function call, after registers in the fpstate are
- * modified and the child task has woken up, the child task will
- * restore the modified FPU state from the modified context. If we
- * didn't clear its cached status here then the cached in-registers
- * state pending on its former CPU could be restored, corrupting
- * the modifications.
- */
-void fpu__prepare_write(struct fpu *fpu)
-{
- /*
- * Only stopped child tasks can be used to modify the FPU
- * state in the fpstate buffer:
- */
- WARN_ON_FPU(fpu == &current->thread.fpu);
-
- /* Invalidate any cached state: */
- __fpu_invalidate_fpregs_state(fpu);
-}
-
-/*
* Drops current FPU state: deactivates the fpregs and
* the fpstate. NOTE: it still leaves previous contents
* in the fpregs in the eager-FPU case.
@@ -340,61 +316,97 @@ void fpu__drop(struct fpu *fpu)
* Clear FPU registers by setting them up from the init fpstate.
* Caller must do fpregs_[un]lock() around it.
*/
-static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
+static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
{
if (use_xsave())
- copy_kernel_to_xregs(&init_fpstate.xsave, features_mask);
- else if (static_cpu_has(X86_FEATURE_FXSR))
- copy_kernel_to_fxregs(&init_fpstate.fxsave);
+ os_xrstor(&init_fpstate.xsave, features_mask);
+ else if (use_fxsr())
+ fxrstor(&init_fpstate.fxsave);
else
- copy_kernel_to_fregs(&init_fpstate.fsave);
+ frstor(&init_fpstate.fsave);
- if (boot_cpu_has(X86_FEATURE_OSPKE))
- copy_init_pkru_to_fpregs();
+ pkru_write_default();
+}
+
+static inline unsigned int init_fpstate_copy_size(void)
+{
+ if (!use_xsave())
+ return fpu_kernel_xstate_size;
+
+ /* XSAVE(S) just needs the legacy and the xstate header part */
+ return sizeof(init_fpstate.xsave);
}
/*
- * Clear the FPU state back to init state.
- *
- * Called by sys_execve(), by the signal handler code and by various
- * error paths.
+ * Reset current->fpu memory state to the init values.
+ */
+static void fpu_reset_fpstate(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+
+ fpregs_lock();
+ fpu__drop(fpu);
+ /*
+ * This does not change the actual hardware registers. It just
+ * resets the memory image and sets TIF_NEED_FPU_LOAD so a
+ * subsequent return to usermode will reload the registers from the
+ * task's memory image.
+ *
+ * Do not use fpstate_init() here. Just copy init_fpstate which has
+ * the correct content already except for PKRU.
+ *
+ * PKRU handling does not rely on the xstate when restoring for
+ * user space as PKRU is eagerly written in switch_to() and
+ * flush_thread().
+ */
+ memcpy(&fpu->state, &init_fpstate, init_fpstate_copy_size());
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ fpregs_unlock();
+}
+
+/*
+ * Reset current's user FPU states to the init states. current's
+ * supervisor states, if any, are not modified by this function. The
+ * caller guarantees that the XSTATE header in memory is intact.
*/
-static void fpu__clear(struct fpu *fpu, bool user_only)
+void fpu__clear_user_states(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
- if (!static_cpu_has(X86_FEATURE_FPU)) {
- fpu__drop(fpu);
- fpu__initialize(fpu);
+ fpregs_lock();
+ if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
+ fpu_reset_fpstate();
+ fpregs_unlock();
return;
}
- fpregs_lock();
-
- if (user_only) {
- if (!fpregs_state_valid(fpu, smp_processor_id()) &&
- xfeatures_mask_supervisor())
- copy_kernel_to_xregs(&fpu->state.xsave,
- xfeatures_mask_supervisor());
- copy_init_fpstate_to_fpregs(xfeatures_mask_user());
- } else {
- copy_init_fpstate_to_fpregs(xfeatures_mask_all);
+ /*
+ * Ensure that current's supervisor states are loaded into their
+ * corresponding registers.
+ */
+ if (xfeatures_mask_supervisor() &&
+ !fpregs_state_valid(fpu, smp_processor_id())) {
+ os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
}
+ /* Reset user states in registers. */
+ restore_fpregs_from_init_fpstate(xfeatures_mask_restore_user());
+
+ /*
+ * Now all FPU registers have their desired values. Inform the FPU
+ * state machine that current's FPU registers are in the hardware
+ * registers. The memory image does not need to be updated because
+ * any operation relying on it has to save the registers first when
+ * current's FPU is marked active.
+ */
fpregs_mark_activate();
fpregs_unlock();
}
-void fpu__clear_user_states(struct fpu *fpu)
+void fpu_flush_thread(void)
{
- fpu__clear(fpu, true);
+ fpu_reset_fpstate();
}
-
-void fpu__clear_all(struct fpu *fpu)
-{
- fpu__clear(fpu, false);
-}
-
/*
* Load FPU context before returning to userspace.
*/
@@ -403,7 +415,7 @@ void switch_fpu_return(void)
if (!static_cpu_has(X86_FEATURE_FPU))
return;
- __fpregs_load_activate();
+ fpregs_restore_userregs();
}
EXPORT_SYMBOL_GPL(switch_fpu_return);
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 701f196d7c68..64e29927cc32 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -89,7 +89,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
/*
* Boot time FPU feature detection code:
*/
-unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
@@ -135,7 +135,7 @@ static void __init fpu__init_system_generic(void)
* This is inherent to the XSAVE architecture which puts all state
* components into a single, continuous memory block:
*/
-unsigned int fpu_kernel_xstate_size;
+unsigned int fpu_kernel_xstate_size __ro_after_init;
EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
/* Get alignment of the TYPE. */
@@ -216,17 +216,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
fpu_user_xstate_size = fpu_kernel_xstate_size;
}
-/*
- * Find supported xfeatures based on cpu features and command-line input.
- * This must be called after fpu__init_parse_early_param() is called and
- * xfeatures_mask is enumerated.
- */
-u64 __init fpu__get_supported_xfeatures_mask(void)
-{
- return XFEATURE_MASK_USER_SUPPORTED |
- XFEATURE_MASK_SUPERVISOR_SUPPORTED;
-}
-
/* Legacy code to initialize eager fpu mode. */
static void __init fpu__init_system_ctx_switch(void)
{
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index c413756ba89f..66ed317ebc0d 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -2,11 +2,13 @@
/*
* FPU register's regset abstraction, for ptrace, core dumps, etc.
*/
+#include <linux/sched/task_stack.h>
+#include <linux/vmalloc.h>
+
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/xstate.h>
-#include <linux/sched/task_stack.h>
/*
* The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
@@ -26,18 +28,58 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r
return 0;
}
+/*
+ * The regset get() functions are invoked from:
+ *
+ * - coredump to dump the current task's fpstate. If the current task
+ * owns the FPU then the memory state has to be synchronized and the
+ * FPU register state preserved. Otherwise fpstate is already in sync.
+ *
+ * - ptrace to dump fpstate of a stopped task, in which case the registers
+ * have already been saved to fpstate on context switch.
+ */
+static void sync_fpstate(struct fpu *fpu)
+{
+ if (fpu == &current->thread.fpu)
+ fpu_sync_fpstate(fpu);
+}
+
+/*
+ * Invalidate cached FPU registers before modifying the stopped target
+ * task's fpstate.
+ *
+ * This forces the target task on resume to restore the FPU registers from
+ * modified fpstate. Otherwise the task might skip the restore and operate
+ * with the cached FPU registers which discards the modifications.
+ */
+static void fpu_force_restore(struct fpu *fpu)
+{
+ /*
+ * Only stopped child tasks can be used to modify the FPU
+ * state in the fpstate buffer:
+ */
+ WARN_ON_FPU(fpu == &current->thread.fpu);
+
+ __fpu_invalidate_fpregs_state(fpu);
+}
+
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
struct fpu *fpu = &target->thread.fpu;
- if (!boot_cpu_has(X86_FEATURE_FXSR))
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__prepare_read(fpu);
- fpstate_sanitize_xstate(fpu);
+ sync_fpstate(fpu);
+
+ if (!use_xsave()) {
+ return membuf_write(&to, &fpu->state.fxsave,
+ sizeof(fpu->state.fxsave));
+ }
- return membuf_write(&to, &fpu->state.fxsave, sizeof(struct fxregs_state));
+ copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_FX);
+ return 0;
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -45,62 +87,52 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
+ struct user32_fxsr_struct newstate;
int ret;
- if (!boot_cpu_has(X86_FEATURE_FXSR))
+ BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
+
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__prepare_write(fpu);
- fpstate_sanitize_xstate(fpu);
+ /* No funny business with partial or oversized writes is permitted. */
+ if (pos != 0 || count != sizeof(newstate))
+ return -EINVAL;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state.fxsave, 0, -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ if (ret)
+ return ret;
- /*
- * mxcsr reserved bits must be masked to zero for security reasons.
- */
- fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+ /* Do not allow an invalid MXCSR value. */
+ if (newstate.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
- /*
- * update the header bits in the xsave header, indicating the
- * presence of FP and SSE state.
- */
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ fpu_force_restore(fpu);
+
+ /* Copy the state */
+ memcpy(&fpu->state.fxsave, &newstate, sizeof(newstate));
+
+ /* Clear xmm8..15 */
+ BUILD_BUG_ON(sizeof(fpu->state.fxsave.xmm_space) != 16 * 16);
+ memset(&fpu->state.fxsave.xmm_space[8], 0, 8 * 16);
+
+ /* Mark FP and SSE as in use when XSAVE is enabled */
+ if (use_xsave())
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
- return ret;
+ return 0;
}
int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
- struct fpu *fpu = &target->thread.fpu;
- struct xregs_state *xsave;
-
- if (!boot_cpu_has(X86_FEATURE_XSAVE))
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
return -ENODEV;
- xsave = &fpu->state.xsave;
-
- fpu__prepare_read(fpu);
+ sync_fpstate(&target->thread.fpu);
- if (using_compacted_format()) {
- copy_xstate_to_kernel(to, xsave);
- return 0;
- } else {
- fpstate_sanitize_xstate(fpu);
- /*
- * Copy the 48 bytes defined by the software into the xsave
- * area in the thread struct, so that we can copy the whole
- * area to user using one user_regset_copyout().
- */
- memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-
- /*
- * Copy the xstate memory layout.
- */
- return membuf_write(&to, xsave, fpu_user_xstate_size);
- }
+ copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
+ return 0;
}
int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -108,44 +140,34 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
- struct xregs_state *xsave;
+ struct xregs_state *tmpbuf = NULL;
int ret;
- if (!boot_cpu_has(X86_FEATURE_XSAVE))
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
return -ENODEV;
/*
* A whole standard-format XSAVE buffer is needed:
*/
- if ((pos != 0) || (count < fpu_user_xstate_size))
+ if (pos != 0 || count != fpu_user_xstate_size)
return -EFAULT;
- xsave = &fpu->state.xsave;
-
- fpu__prepare_write(fpu);
+ if (!kbuf) {
+ tmpbuf = vmalloc(count);
+ if (!tmpbuf)
+ return -ENOMEM;
- if (using_compacted_format()) {
- if (kbuf)
- ret = copy_kernel_to_xstate(xsave, kbuf);
- else
- ret = copy_user_to_xstate(xsave, ubuf);
- } else {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
- if (!ret)
- ret = validate_user_xstate_header(&xsave->header);
+ if (copy_from_user(tmpbuf, ubuf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
- /*
- * mxcsr reserved bits must be masked to zero for security reasons.
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
-
- /*
- * In case of failure, mark all states as init:
- */
- if (ret)
- fpstate_init(&fpu->state);
+ fpu_force_restore(fpu);
+ ret = copy_uabi_from_kernel_to_xstate(&fpu->state.xsave, kbuf ?: tmpbuf);
+out:
+ vfree(tmpbuf);
return ret;
}
@@ -221,10 +243,10 @@ static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
* FXSR floating point environment conversions.
*/
-void
-convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
+ struct task_struct *tsk,
+ struct fxregs_state *fxsave)
{
- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -258,6 +280,12 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
+void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+{
+ __convert_from_fxsr(env, tsk, &tsk->thread.fpu.state.fxsave);
+}
+
void convert_to_fxsr(struct fxregs_state *fxsave,
const struct user_i387_ia32_struct *env)
@@ -290,25 +318,29 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
+ struct fxregs_state fxsave, *fx;
- fpu__prepare_read(fpu);
+ sync_fpstate(fpu);
- if (!boot_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, to);
- if (!boot_cpu_has(X86_FEATURE_FXSR)) {
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR)) {
return membuf_write(&to, &fpu->state.fsave,
sizeof(struct fregs_state));
}
- fpstate_sanitize_xstate(fpu);
+ if (use_xsave()) {
+ struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) };
- if (to.left == sizeof(env)) {
- convert_from_fxsr(to.p, target);
- return 0;
+ /* Handle init state optimized xstate correctly */
+ copy_xstate_to_uabi_buf(mb, target, XSTATE_COPY_FP);
+ fx = &fxsave;
+ } else {
+ fx = &fpu->state.fxsave;
}
- convert_from_fxsr(&env, target);
+ __convert_from_fxsr(&env, target, fx);
return membuf_write(&to, &env, sizeof(env));
}
@@ -320,31 +352,32 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
struct user_i387_ia32_struct env;
int ret;
- fpu__prepare_write(fpu);
- fpstate_sanitize_xstate(fpu);
+ /* No funny business with partial or oversized writes is permitted. */
+ if (pos != 0 || count != sizeof(struct user_i387_ia32_struct))
+ return -EINVAL;
- if (!boot_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
- if (!boot_cpu_has(X86_FEATURE_FXSR))
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state.fsave, 0,
- -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+ if (ret)
+ return ret;
- if (pos > 0 || count < sizeof(env))
- convert_from_fxsr(&env, target);
+ fpu_force_restore(fpu);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
- if (!ret)
- convert_to_fxsr(&target->thread.fpu.state.fxsave, &env);
+ if (cpu_feature_enabled(X86_FEATURE_FXSR))
+ convert_to_fxsr(&fpu->state.fxsave, &env);
+ else
+ memcpy(&fpu->state.fsave, &env, sizeof(env));
/*
- * update the header bit in the xsave header, indicating the
+ * Update the header bit in the xsave header, indicating the
* presence of FP.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
- return ret;
+
+ return 0;
}
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index b7b92cdf3add..445c57c9c539 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -15,29 +15,30 @@
#include <asm/sigframe.h>
#include <asm/trace/fpu.h>
-static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
+static struct _fpx_sw_bytes fx_sw_reserved __ro_after_init;
+static struct _fpx_sw_bytes fx_sw_reserved_ia32 __ro_after_init;
/*
* Check for the presence of extended state information in the
* user fpstate pointer in the sigcontext.
*/
-static inline int check_for_xstate(struct fxregs_state __user *buf,
- void __user *fpstate,
- struct _fpx_sw_bytes *fx_sw)
+static inline int check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
+ struct _fpx_sw_bytes *fx_sw)
{
int min_xstate_size = sizeof(struct fxregs_state) +
sizeof(struct xstate_header);
+ void __user *fpstate = fxbuf;
unsigned int magic2;
- if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
- return -1;
+ if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw)))
+ return -EFAULT;
/* Check for the first magic field and other error scenarios. */
if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
fx_sw->xstate_size < min_xstate_size ||
fx_sw->xstate_size > fpu_user_xstate_size ||
fx_sw->xstate_size > fx_sw->extended_size)
- return -1;
+ goto setfx;
/*
* Check for the presence of second magic word at the end of memory
@@ -45,10 +46,18 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
* fpstate layout with out copying the extended state information
* in the memory layout.
*/
- if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
- || magic2 != FP_XSTATE_MAGIC2)
- return -1;
+ if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)))
+ return -EFAULT;
+ if (likely(magic2 == FP_XSTATE_MAGIC2))
+ return 0;
+setfx:
+ trace_x86_fpu_xstate_check_failed(&current->thread.fpu);
+
+ /* Set the parameters for fx only state */
+ fx_sw->magic1 = 0;
+ fx_sw->xstate_size = sizeof(struct fxregs_state);
+ fx_sw->xfeatures = XFEATURE_MASK_FPSSE;
return 0;
}
@@ -64,7 +73,7 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
fpregs_lock();
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
- copy_fxregs_to_kernel(&tsk->thread.fpu);
+ fxsave(&tsk->thread.fpu.state.fxsave);
fpregs_unlock();
convert_from_fxsr(&env, tsk);
@@ -129,11 +138,11 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
int err;
if (use_xsave())
- err = copy_xregs_to_user(buf);
+ err = xsave_to_user_sigframe(buf);
else if (use_fxsr())
- err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
+ err = fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
else
- err = copy_fregs_to_user((struct fregs_state __user *) buf);
+ err = fnsave_to_user_sigframe((struct fregs_state __user *) buf);
if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
err = -EFAULT;
@@ -188,7 +197,7 @@ retry:
*/
fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
- __fpregs_load_activate();
+ fpregs_restore_userregs();
pagefault_disable();
ret = copy_fpregs_to_sigframe(buf_fx);
@@ -211,270 +220,202 @@ retry:
return 0;
}
-static inline void
-sanitize_restored_user_xstate(union fpregs_state *state,
- struct user_i387_ia32_struct *ia32_env,
- u64 user_xfeatures, int fx_only)
+static int __restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ bool fx_only)
{
- struct xregs_state *xsave = &state->xsave;
- struct xstate_header *header = &xsave->header;
-
if (use_xsave()) {
- /*
- * Clear all feature bits which are not set in
- * user_xfeatures and clear all extended features
- * for fx_only mode.
- */
- u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
+ u64 init_bv = xfeatures_mask_uabi() & ~xrestore;
+ int ret;
- /*
- * Supervisor state has to be preserved. The sigframe
- * restore can only modify user features, i.e. @mask
- * cannot contain them.
- */
- header->xfeatures &= mask | xfeatures_mask_supervisor();
- }
-
- if (use_fxsr()) {
- /*
- * mscsr reserved bits must be masked to zero for security
- * reasons.
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
+ if (likely(!fx_only))
+ ret = xrstor_from_user_sigframe(buf, xrestore);
+ else
+ ret = fxrstor_from_user_sigframe(buf);
- if (ia32_env)
- convert_to_fxsr(&state->fxsave, ia32_env);
+ if (!ret && unlikely(init_bv))
+ os_xrstor(&init_fpstate.xsave, init_bv);
+ return ret;
+ } else if (use_fxsr()) {
+ return fxrstor_from_user_sigframe(buf);
+ } else {
+ return frstor_from_user_sigframe(buf);
}
}
/*
- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
+ * Attempt to restore the FPU registers directly from user memory.
+ * Pagefaults are handled and any errors returned are fatal.
*/
-static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ bool fx_only, unsigned int size)
{
- u64 init_bv;
- int r;
+ struct fpu *fpu = &current->thread.fpu;
+ int ret;
- if (use_xsave()) {
- if (fx_only) {
- init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
-
- r = copy_user_to_fxregs(buf);
- if (!r)
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- return r;
- } else {
- init_bv = xfeatures_mask_user() & ~xbv;
-
- r = copy_user_to_xregs(buf, xbv);
- if (!r && unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- return r;
- }
- } else if (use_fxsr()) {
- return copy_user_to_fxregs(buf);
- } else
- return copy_user_to_fregs(buf);
+retry:
+ fpregs_lock();
+ pagefault_disable();
+ ret = __restore_fpregs_from_user(buf, xrestore, fx_only);
+ pagefault_enable();
+
+ if (unlikely(ret)) {
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task
+ * might preempt current and return to user space with
+ * corrupted FPU registers.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+ fpregs_unlock();
+
+ /* Try to handle #PF, but anything else is fatal. */
+ if (ret != -EFAULT)
+ return -EINVAL;
+
+ ret = fault_in_pages_readable(buf, size);
+ if (!ret)
+ goto retry;
+ return ret;
+ }
+
+ /*
+ * Restore supervisor states: previous context switch etc has done
+ * XSAVES and saved the supervisor states in the kernel buffer from
+ * which they can be restored now.
+ *
+ * It would be optimal to handle this with a single XRSTORS, but
+ * this does not work because the rest of the FPU registers have
+ * been restored from a user buffer directly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
+ os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
+
+ fpregs_mark_activate();
+ fpregs_unlock();
+ return 0;
}
-static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ bool ia32_fxstate)
{
- struct user_i387_ia32_struct *envp = NULL;
int state_size = fpu_kernel_xstate_size;
- int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
u64 user_xfeatures = 0;
- int fx_only = 0;
- int ret = 0;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-
- if (!buf) {
- fpu__clear_user_states(fpu);
- return 0;
- }
-
- if (!access_ok(buf, size)) {
- ret = -EACCES;
- goto out;
- }
-
- if (!static_cpu_has(X86_FEATURE_FPU)) {
- ret = fpregs_soft_set(current, NULL, 0,
- sizeof(struct user_i387_ia32_struct),
- NULL, buf);
- goto out;
- }
+ bool fx_only = false;
+ int ret;
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
- if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
- /*
- * Couldn't find the extended state information in the
- * memory layout. Restore just the FP/SSE and init all
- * the other extended state.
- */
- state_size = sizeof(struct fxregs_state);
- fx_only = 1;
- trace_x86_fpu_xstate_check_failed(fpu);
- } else {
- state_size = fx_sw_user.xstate_size;
- user_xfeatures = fx_sw_user.xfeatures;
- }
- }
- if ((unsigned long)buf_fx % 64)
- fx_only = 1;
+ ret = check_xstate_in_sigframe(buf_fx, &fx_sw_user);
+ if (unlikely(ret))
+ return ret;
- if (!ia32_fxstate) {
- /*
- * Attempt to restore the FPU registers directly from user
- * memory. For that to succeed, the user access cannot cause
- * page faults. If it does, fall back to the slow path below,
- * going through the kernel buffer with the enabled pagefault
- * handler.
- */
- fpregs_lock();
- pagefault_disable();
- ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
- pagefault_enable();
- if (!ret) {
-
- /*
- * Restore supervisor states: previous context switch
- * etc has done XSAVES and saved the supervisor states
- * in the kernel buffer from which they can be restored
- * now.
- *
- * We cannot do a single XRSTORS here - which would
- * be nice - because the rest of the FPU registers are
- * being restored from a user buffer directly. The
- * single XRSTORS happens below, when the user buffer
- * has been copied to the kernel one.
- */
- if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
- xfeatures_mask_supervisor())
- copy_kernel_to_xregs(&fpu->state.xsave,
- xfeatures_mask_supervisor());
- fpregs_mark_activate();
- fpregs_unlock();
- return 0;
- }
-
- /*
- * The above did an FPU restore operation, restricted to
- * the user portion of the registers, and failed, but the
- * microcode might have modified the FPU registers
- * nevertheless.
- *
- * If the FPU registers do not belong to current, then
- * invalidate the FPU register state otherwise the task might
- * preempt current and return to user space with corrupted
- * FPU registers.
- *
- * In case current owns the FPU registers then no further
- * action is required. The fixup below will handle it
- * correctly.
- */
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- __cpu_invalidate_fpregs_state();
-
- fpregs_unlock();
+ fx_only = !fx_sw_user.magic1;
+ state_size = fx_sw_user.xstate_size;
+ user_xfeatures = fx_sw_user.xfeatures;
} else {
+ user_xfeatures = XFEATURE_MASK_FPSSE;
+ }
+
+ if (likely(!ia32_fxstate)) {
/*
- * For 32-bit frames with fxstate, copy the fxstate so it can
- * be reconstructed later.
+ * Attempt to restore the FPU registers directly from user
+ * memory. For that to succeed, the user access cannot cause page
+ * faults. If it does, fall back to the slow path below, going
+ * through the kernel buffer with the enabled pagefault handler.
*/
- ret = __copy_from_user(&env, buf, sizeof(env));
- if (ret)
- goto out;
- envp = &env;
+ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
+ state_size);
}
/*
+ * Copy the legacy state because the FP portion of the FX frame has
+ * to be ignored for histerical raisins. The legacy state is folded
+ * in once the larger state has been copied.
+ */
+ ret = __copy_from_user(&env, buf, sizeof(env));
+ if (ret)
+ return ret;
+
+ /*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
* not modified on context switch and that the xstate is considered
* to be loaded again on return to userland (overriding last_cpu avoids
* the optimisation).
*/
fpregs_lock();
-
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-
/*
- * Supervisor states are not modified by user space input. Save
- * current supervisor states first and invalidate the FPU regs.
+ * If supervisor states are available then save the
+ * hardware state in current's fpstate so that the
+ * supervisor state is preserved. Save the full state for
+ * simplicity. There is no point in optimizing this by only
+ * saving the supervisor states and then shuffle them to
+ * the right place in memory. It's ia32 mode. Shrug.
*/
if (xfeatures_mask_supervisor())
- copy_supervisor_to_kernel(&fpu->state.xsave);
+ os_xsave(&fpu->state.xsave);
set_thread_flag(TIF_NEED_FPU_LOAD);
}
__fpu_invalidate_fpregs_state(fpu);
+ __cpu_invalidate_fpregs_state();
fpregs_unlock();
if (use_xsave() && !fx_only) {
- u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
-
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto out;
+ return ret;
+ } else {
+ if (__copy_from_user(&fpu->state.fxsave, buf_fx,
+ sizeof(fpu->state.fxsave)))
+ return -EFAULT;
- sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
- fx_only);
+ /* Reject invalid MXCSR values. */
+ if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
- fpregs_lock();
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
+ if (use_xsave())
+ fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
+ }
+ /* Fold the legacy FP storage */
+ convert_to_fxsr(&fpu->state.fxsave, &env);
+
+ fpregs_lock();
+ if (use_xsave()) {
/*
- * Restore previously saved supervisor xstates along with
- * copied-in user xstates.
+ * Remove all UABI feature bits not set in user_xfeatures
+ * from the memory xstate header which makes the full
+ * restore below bring them into init state. This works for
+ * fx_only mode as well because that has only FP and SSE
+ * set in user_xfeatures.
+ *
+ * Preserve supervisor states!
*/
- ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
- user_xfeatures | xfeatures_mask_supervisor());
+ u64 mask = user_xfeatures | xfeatures_mask_supervisor();
- } else if (use_fxsr()) {
- ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
- sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
- fx_only);
-
- fpregs_lock();
- if (use_xsave()) {
- u64 init_bv;
-
- init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- }
-
- ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
+ fpu->state.xsave.header.xfeatures &= mask;
+ ret = os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
} else {
- ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
- if (ret)
- goto out;
-
- fpregs_lock();
- ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
+ ret = fxrstor_safe(&fpu->state.fxsave);
}
- if (!ret)
+
+ if (likely(!ret))
fpregs_mark_activate();
- else
- fpregs_deactivate(fpu);
- fpregs_unlock();
-out:
- if (ret)
- fpu__clear_user_states(fpu);
+ fpregs_unlock();
return ret;
}
-
static inline int xstate_sigframe_size(void)
{
return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
@@ -486,15 +427,47 @@ static inline int xstate_sigframe_size(void)
*/
int fpu__restore_sig(void __user *buf, int ia32_frame)
{
+ unsigned int size = xstate_sigframe_size();
+ struct fpu *fpu = &current->thread.fpu;
void __user *buf_fx = buf;
- int size = xstate_sigframe_size();
+ bool ia32_fxstate = false;
+ int ret;
+ if (unlikely(!buf)) {
+ fpu__clear_user_states(fpu);
+ return 0;
+ }
+
+ ia32_frame &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
+
+ /*
+ * Only FXSR enabled systems need the FX state quirk.
+ * FRSTOR does not need it and can use the fast path.
+ */
if (ia32_frame && use_fxsr()) {
buf_fx = buf + sizeof(struct fregs_state);
size += sizeof(struct fregs_state);
+ ia32_fxstate = true;
+ }
+
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ if (!IS_ENABLED(CONFIG_X86_64) && !cpu_feature_enabled(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ } else {
+ ret = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
}
- return __fpu__restore_sig(buf, buf_fx, size);
+out:
+ if (unlikely(ret))
+ fpu__clear_user_states(fpu);
+ return ret;
}
unsigned long
@@ -513,6 +486,25 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
return sp;
}
+
+unsigned long fpu__get_fpstate_size(void)
+{
+ unsigned long ret = xstate_sigframe_size();
+
+ /*
+ * This space is needed on (most) 32-bit kernels, or when a 32-bit
+ * app is running on a 64-bit kernel. To keep things simple, just
+ * assume the worst case and always include space for 'freg_state',
+ * even for 64-bit apps on 64-bit kernels. This wastes a bit of
+ * space, but keeps the code simple.
+ */
+ if ((IS_ENABLED(CONFIG_IA32_EMULATION) ||
+ IS_ENABLED(CONFIG_X86_32)) && use_fxsr())
+ ret += sizeof(struct fregs_state);
+
+ return ret;
+}
+
/*
* Prepare the SW reserved portion of the fxsave memory layout, indicating
* the presence of the extended state information in the memory layout
@@ -526,7 +518,7 @@ void fpu__init_prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size;
- fx_sw_reserved.xfeatures = xfeatures_mask_user();
+ fx_sw_reserved.xfeatures = xfeatures_mask_uabi();
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 1cadb2faf740..c8def1b7f8fb 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -59,19 +59,24 @@ static short xsave_cpuid_features[] __initdata = {
* This represents the full set of bits that should ever be set in a kernel
* XSAVE buffer, both supervisor and user xstates.
*/
-u64 xfeatures_mask_all __read_mostly;
-
-static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
+u64 xfeatures_mask_all __ro_after_init;
+EXPORT_SYMBOL_GPL(xfeatures_mask_all);
+
+static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
/*
* The XSAVE area of kernel can be in standard or compacted format;
* it is always in standard format for user mode. This is the user
* mode standard format size used for signal and ptrace frames.
*/
-unsigned int fpu_user_xstate_size;
+unsigned int fpu_user_xstate_size __ro_after_init;
/*
* Return whether the system supports a given xfeature.
@@ -125,103 +130,13 @@ static bool xfeature_is_supervisor(int xfeature_nr)
}
/*
- * When executing XSAVEOPT (or other optimized XSAVE instructions), if
- * a processor implementation detects that an FPU state component is still
- * (or is again) in its initialized state, it may clear the corresponding
- * bit in the header.xfeatures field, and can skip the writeout of registers
- * to the corresponding memory layout.
- *
- * This means that when the bit is zero, the state component might still contain
- * some previous - non-initialized register state.
- *
- * Before writing xstate information to user-space we sanitize those components,
- * to always ensure that the memory layout of a feature will be in the init state
- * if the corresponding header bit is zero. This is to ensure that user-space doesn't
- * see some stale state in the memory layout during signal handling, debugging etc.
- */
-void fpstate_sanitize_xstate(struct fpu *fpu)
-{
- struct fxregs_state *fx = &fpu->state.fxsave;
- int feature_bit;
- u64 xfeatures;
-
- if (!use_xsaveopt())
- return;
-
- xfeatures = fpu->state.xsave.header.xfeatures;
-
- /*
- * None of the feature bits are in init state. So nothing else
- * to do for us, as the memory layout is up to date.
- */
- if ((xfeatures & xfeatures_mask_all) == xfeatures_mask_all)
- return;
-
- /*
- * FP is in init state
- */
- if (!(xfeatures & XFEATURE_MASK_FP)) {
- fx->cwd = 0x37f;
- fx->swd = 0;
- fx->twd = 0;
- fx->fop = 0;
- fx->rip = 0;
- fx->rdp = 0;
- memset(fx->st_space, 0, sizeof(fx->st_space));
- }
-
- /*
- * SSE is in init state
- */
- if (!(xfeatures & XFEATURE_MASK_SSE))
- memset(fx->xmm_space, 0, sizeof(fx->xmm_space));
-
- /*
- * First two features are FPU and SSE, which above we handled
- * in a special way already:
- */
- feature_bit = 0x2;
- xfeatures = (xfeatures_mask_user() & ~xfeatures) >> 2;
-
- /*
- * Update all the remaining memory layouts according to their
- * standard xstate layout, if their header bit is in the init
- * state:
- */
- while (xfeatures) {
- if (xfeatures & 0x1) {
- int offset = xstate_comp_offsets[feature_bit];
- int size = xstate_sizes[feature_bit];
-
- memcpy((void *)fx + offset,
- (void *)&init_fpstate.xsave + offset,
- size);
- }
-
- xfeatures >>= 1;
- feature_bit++;
- }
-}
-
-/*
* Enable the extended processor state save/restore feature.
* Called once per CPU onlining.
*/
void fpu__init_cpu_xstate(void)
{
- u64 unsup_bits;
-
if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
return;
- /*
- * Unsupported supervisor xstates should not be found in
- * the xfeatures mask.
- */
- unsup_bits = xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
- WARN_ONCE(unsup_bits, "x86/fpu: Found unsupported supervisor xstates: 0x%llx\n",
- unsup_bits);
-
- xfeatures_mask_all &= ~XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
cr4_set_bits(X86_CR4_OSXSAVE);
@@ -230,14 +145,14 @@ void fpu__init_cpu_xstate(void)
* managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
* states can be set here.
*/
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
/*
* MSR_IA32_XSS sets supervisor states managed by XSAVES.
*/
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
- xfeatures_mask_dynamic());
+ xfeatures_mask_independent());
}
}
@@ -486,7 +401,7 @@ static void __init setup_init_fpu_buf(void)
/*
* Init all the features state with header.xfeatures being 0x0
*/
- copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+ os_xrstor_booting(&init_fpstate.xsave);
/*
* All components are now in init state. Read the state back so
@@ -535,25 +450,11 @@ int xfeature_size(int xfeature_nr)
return eax;
}
-/*
- * 'XSAVES' implies two different things:
- * 1. saving of supervisor/system state
- * 2. using the compacted format
- *
- * Use this function when dealing with the compacted format so
- * that it is obvious which aspect of 'XSAVES' is being handled
- * by the calling code.
- */
-int using_compacted_format(void)
-{
- return boot_cpu_has(X86_FEATURE_XSAVES);
-}
-
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
-int validate_user_xstate_header(const struct xstate_header *hdr)
+static int validate_user_xstate_header(const struct xstate_header *hdr)
{
/* No unknown or supervisor features may be set */
- if (hdr->xfeatures & ~xfeatures_mask_user())
+ if (hdr->xfeatures & ~xfeatures_mask_uabi())
return -EINVAL;
/* Userspace must use the uncompacted format */
@@ -651,7 +552,7 @@ static void check_xstate_against_struct(int nr)
* how large the XSAVE buffer needs to be. We are recalculating
* it to be safe.
*
- * Dynamic XSAVE features allocate their own buffers and are not
+ * Independent XSAVE features allocate their own buffers and are not
* covered by these checks. Only the size of the buffer for task->fpu
* is checked here.
*/
@@ -667,9 +568,9 @@ static void do_extra_xstate_size_checks(void)
check_xstate_against_struct(i);
/*
* Supervisor state components can be managed only by
- * XSAVES, which is compacted-format only.
+ * XSAVES.
*/
- if (!using_compacted_format())
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
XSTATE_WARN_ON(xfeature_is_supervisor(i));
/* Align from the end of the previous feature */
@@ -679,9 +580,9 @@ static void do_extra_xstate_size_checks(void)
* The offset of a given state in the non-compacted
* format is given to us in a CPUID leaf. We check
* them for being ordered (increasing offsets) in
- * setup_xstate_features().
+ * setup_xstate_features(). XSAVES uses compacted format.
*/
- if (!using_compacted_format())
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
paranoid_xstate_size = xfeature_uncompacted_offset(i);
/*
* The compacted-format offset always depends on where
@@ -717,18 +618,18 @@ static unsigned int __init get_xsaves_size(void)
}
/*
- * Get the total size of the enabled xstates without the dynamic supervisor
+ * Get the total size of the enabled xstates without the independent supervisor
* features.
*/
-static unsigned int __init get_xsaves_size_no_dynamic(void)
+static unsigned int __init get_xsaves_size_no_independent(void)
{
- u64 mask = xfeatures_mask_dynamic();
+ u64 mask = xfeatures_mask_independent();
unsigned int size;
if (!mask)
return get_xsaves_size();
- /* Disable dynamic features. */
+ /* Disable independent features. */
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
/*
@@ -737,7 +638,7 @@ static unsigned int __init get_xsaves_size_no_dynamic(void)
*/
size = get_xsaves_size();
- /* Re-enable dynamic features so XSAVES will work on them again. */
+ /* Re-enable independent features so XSAVES will work on them again. */
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
return size;
@@ -780,7 +681,7 @@ static int __init init_xstate_size(void)
xsave_size = get_xsave_size();
if (boot_cpu_has(X86_FEATURE_XSAVES))
- possible_xstate_size = get_xsaves_size_no_dynamic();
+ possible_xstate_size = get_xsaves_size_no_independent();
else
possible_xstate_size = xsave_size;
@@ -821,6 +722,7 @@ void __init fpu__init_system_xstate(void)
{
unsigned int eax, ebx, ecx, edx;
static int on_boot_cpu __initdata = 1;
+ u64 xfeatures;
int err;
int i;
@@ -855,7 +757,7 @@ void __init fpu__init_system_xstate(void)
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
xfeatures_mask_all |= ecx + ((u64)edx << 32);
- if ((xfeatures_mask_user() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
+ if ((xfeatures_mask_uabi() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
/*
* This indicates that something really unexpected happened
* with the enumeration. Disable XSAVE and try to continue
@@ -874,7 +776,11 @@ void __init fpu__init_system_xstate(void)
xfeatures_mask_all &= ~BIT_ULL(i);
}
- xfeatures_mask_all &= fpu__get_supported_xfeatures_mask();
+ xfeatures_mask_all &= XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED;
+
+ /* Store it for paranoia check at the end */
+ xfeatures = xfeatures_mask_all;
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
@@ -886,14 +792,24 @@ void __init fpu__init_system_xstate(void)
* Update info used for ptrace frames; use standard-format size and no
* supervisor xstates:
*/
- update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_user());
+ update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_uabi());
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp_offsets();
setup_supervisor_only_offsets();
- print_xstate_offset_size();
+ /*
+ * Paranoia check whether something in the setup modified the
+ * xfeatures mask.
+ */
+ if (xfeatures != xfeatures_mask_all) {
+ pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
+ xfeatures, xfeatures_mask_all);
+ goto out_disable;
+ }
+
+ print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
xfeatures_mask_all,
fpu_kernel_xstate_size,
@@ -913,16 +829,16 @@ void fpu__resume_cpu(void)
/*
* Restore XCR0 on xsave capable CPUs:
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE))
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
/*
* Restore IA32_XSS. The same CPUID bit enumerates support
* of XSAVES and MSR_IA32_XSS.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
- xfeatures_mask_dynamic());
+ xfeatures_mask_independent());
}
}
@@ -990,36 +906,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
}
EXPORT_SYMBOL_GPL(get_xsave_addr);
-/*
- * This wraps up the common operations that need to occur when retrieving
- * data from xsave state. It first ensures that the current task was
- * using the FPU and retrieves the data in to a buffer. It then calculates
- * the offset of the requested field in the buffer.
- *
- * This function is safe to call whether the FPU is in use or not.
- *
- * Note that this only works on the current task.
- *
- * Inputs:
- * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
- * XFEATURE_SSE, etc...)
- * Output:
- * address of the state in the xsave area or NULL if the state
- * is not present or is in its 'init state'.
- */
-const void *get_xsave_field_ptr(int xfeature_nr)
-{
- struct fpu *fpu = &current->thread.fpu;
-
- /*
- * fpu__save() takes the CPU's xstate registers
- * and saves them off to the 'fpu memory buffer.
- */
- fpu__save(fpu);
-
- return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
-}
-
#ifdef CONFIG_ARCH_HAS_PKEYS
/*
@@ -1027,17 +913,16 @@ const void *get_xsave_field_ptr(int xfeature_nr)
* rights for @pkey to @init_val.
*/
int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
- unsigned long init_val)
+ unsigned long init_val)
{
- u32 old_pkru;
- int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
- u32 new_pkru_bits = 0;
+ u32 old_pkru, new_pkru_bits = 0;
+ int pkey_shift;
/*
* This check implies XSAVE support. OSPKE only gets
* set if we enable XSAVE and we enable PKU in XCR0.
*/
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return -EINVAL;
/*
@@ -1045,7 +930,8 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
* values originating from in-kernel users. Complain
* if a bad value is observed.
*/
- WARN_ON_ONCE(pkey >= arch_max_pkey());
+ if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
+ return -EINVAL;
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
@@ -1054,6 +940,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
new_pkru_bits |= PKRU_WD_BIT;
/* Shift the bits in to the correct place in PKRU for pkey: */
+ pkey_shift = pkey * PKRU_BITS_PER_PKEY;
new_pkru_bits <<= pkey_shift;
/* Get old PKRU and mask off any old bits in place: */
@@ -1067,170 +954,178 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
-/*
- * Weird legacy quirk: SSE and YMM states store information in the
- * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
- * area is marked as unused in the xfeatures header, we need to copy
- * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
- */
-static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
+static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+ void *init_xstate, unsigned int size)
{
- if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
- return false;
-
- if (xfeatures & XFEATURE_MASK_FP)
- return false;
-
- return true;
-}
-
-static void fill_gap(struct membuf *to, unsigned *last, unsigned offset)
-{
- if (*last >= offset)
- return;
- membuf_write(to, (void *)&init_fpstate.xsave + *last, offset - *last);
- *last = offset;
-}
-
-static void copy_part(struct membuf *to, unsigned *last, unsigned offset,
- unsigned size, void *from)
-{
- fill_gap(to, last, offset);
- membuf_write(to, from, size);
- *last = offset + size;
+ membuf_write(to, from_xstate ? xstate : init_xstate, size);
}
-/*
- * Convert from kernel XSAVES compacted format to standard format and copy
- * to a kernel-space ptrace buffer.
+/**
+ * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
+ * @to: membuf descriptor
+ * @tsk: The task from which to copy the saved xstate
+ * @copy_mode: The requested copy mode
+ *
+ * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
+ * format, i.e. from the kernel internal hardware dependent storage format
+ * to the requested @mode. UABI XSTATE is always uncompacted!
*
- * It supports partial copy but pos always starts from zero. This is called
- * from xstateregs_get() and there we check the CPU has XSAVES.
+ * It supports partial copy but @to.pos always starts from zero.
*/
-void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave)
+void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode copy_mode)
{
+ const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+ struct xregs_state *xinit = &init_fpstate.xsave;
struct xstate_header header;
- const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
- unsigned size = to.left;
- unsigned last = 0;
+ unsigned int zerofrom;
int i;
- /*
- * The destination is a ptrace buffer; we put in only user xstates:
- */
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
- header.xfeatures &= xfeatures_mask_user();
-
- if (header.xfeatures & XFEATURE_MASK_FP)
- copy_part(&to, &last, 0, off_mxcsr, &xsave->i387);
- if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
- copy_part(&to, &last, off_mxcsr,
- MXCSR_AND_FLAGS_SIZE, &xsave->i387.mxcsr);
- if (header.xfeatures & XFEATURE_MASK_FP)
- copy_part(&to, &last, offsetof(struct fxregs_state, st_space),
- 128, &xsave->i387.st_space);
- if (header.xfeatures & XFEATURE_MASK_SSE)
- copy_part(&to, &last, xstate_offsets[XFEATURE_SSE],
- 256, &xsave->i387.xmm_space);
- /*
- * Fill xsave->i387.sw_reserved value for ptrace frame:
- */
- copy_part(&to, &last, offsetof(struct fxregs_state, sw_reserved),
- 48, xstate_fx_sw_bytes);
- /*
- * Copy xregs_state->header:
- */
- copy_part(&to, &last, offsetof(struct xregs_state, header),
- sizeof(header), &header);
- for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
- /*
- * Copy only in-use xstates:
- */
- if ((header.xfeatures >> i) & 1) {
- void *src = __raw_xsave_addr(xsave, i);
+ /* Mask out the feature bits depending on copy mode */
+ switch (copy_mode) {
+ case XSTATE_COPY_FP:
+ header.xfeatures &= XFEATURE_MASK_FP;
+ break;
- copy_part(&to, &last, xstate_offsets[i],
- xstate_sizes[i], src);
- }
+ case XSTATE_COPY_FX:
+ header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
+ break;
+ case XSTATE_COPY_XSAVE:
+ header.xfeatures &= xfeatures_mask_uabi();
+ break;
}
- fill_gap(&to, &last, size);
-}
-/*
- * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
- * and copy to the target thread. This is called from xstateregs_set().
- */
-int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
-{
- unsigned int offset, size;
- int i;
- struct xstate_header hdr;
+ /* Copy FP state up to MXCSR */
+ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
+ &xinit->i387, off_mxcsr);
- offset = offsetof(struct xregs_state, header);
- size = sizeof(hdr);
+ /* Copy MXCSR when SSE or YMM are set in the feature mask */
+ copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
+ &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
+ MXCSR_AND_FLAGS_SIZE);
- memcpy(&hdr, kbuf + offset, size);
+ /* Copy the remaining FP state */
+ copy_feature(header.xfeatures & XFEATURE_MASK_FP,
+ &to, &xsave->i387.st_space, &xinit->i387.st_space,
+ sizeof(xsave->i387.st_space));
- if (validate_user_xstate_header(&hdr))
- return -EINVAL;
+ /* Copy the SSE state - shared with YMM, but independently managed */
+ copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
+ &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
+ sizeof(xsave->i387.xmm_space));
- for (i = 0; i < XFEATURE_MAX; i++) {
- u64 mask = ((u64)1 << i);
+ if (copy_mode != XSTATE_COPY_XSAVE)
+ goto out;
- if (hdr.xfeatures & mask) {
- void *dst = __raw_xsave_addr(xsave, i);
+ /* Zero the padding area */
+ membuf_zero(&to, sizeof(xsave->i387.padding));
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
+ /* Copy xsave->i387.sw_reserved */
+ membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
- memcpy(dst, kbuf + offset, size);
- }
- }
+ /* Copy the user space relevant state of @xsave->header */
+ membuf_write(&to, &header, sizeof(header));
- if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
- }
+ zerofrom = offsetof(struct xregs_state, extended_state_area);
- /*
- * The state that came in from userspace was user-state only.
- * Mask all the user states out of 'xfeatures':
- */
- xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+ /*
+ * The ptrace buffer is in non-compacted XSAVE format.
+ * In non-compacted format disabled features still occupy
+ * state space, but there is no state to copy from in the
+ * compacted init_fpstate. The gap tracking will zero this
+ * later.
+ */
+ if (!(xfeatures_mask_uabi() & BIT_ULL(i)))
+ continue;
- /*
- * Add back in the features that came in from userspace:
- */
- xsave->header.xfeatures |= hdr.xfeatures;
+ /*
+ * If there was a feature or alignment gap, zero the space
+ * in the destination buffer.
+ */
+ if (zerofrom < xstate_offsets[i])
+ membuf_zero(&to, xstate_offsets[i] - zerofrom);
+
+ if (i == XFEATURE_PKRU) {
+ struct pkru_state pkru = {0};
+ /*
+ * PKRU is not necessarily up to date in the
+ * thread's XSAVE buffer. Fill this part from the
+ * per-thread storage.
+ */
+ pkru.pkru = tsk->thread.pkru;
+ membuf_write(&to, &pkru, sizeof(pkru));
+ } else {
+ copy_feature(header.xfeatures & BIT_ULL(i), &to,
+ __raw_xsave_addr(xsave, i),
+ __raw_xsave_addr(xinit, i),
+ xstate_sizes[i]);
+ }
+ /*
+ * Keep track of the last copied state in the non-compacted
+ * target buffer for gap zeroing.
+ */
+ zerofrom = xstate_offsets[i] + xstate_sizes[i];
+ }
+
+out:
+ if (to.left)
+ membuf_zero(&to, to.left);
+}
+static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (kbuf) {
+ memcpy(dst, kbuf + offset, size);
+ } else {
+ if (copy_from_user(dst, ubuf + offset, size))
+ return -EFAULT;
+ }
return 0;
}
-/*
- * Convert from a ptrace or sigreturn standard-format user-space buffer to
- * kernel XSAVES format and copy to the target thread. This is called from
- * xstateregs_set(), as well as potentially from the sigreturn() and
- * rt_sigreturn() system calls.
- */
-int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
+
+static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf,
+ const void __user *ubuf)
{
unsigned int offset, size;
- int i;
struct xstate_header hdr;
+ u64 mask;
+ int i;
offset = offsetof(struct xregs_state, header);
- size = sizeof(hdr);
-
- if (__copy_from_user(&hdr, ubuf + offset, size))
+ if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
return -EFAULT;
if (validate_user_xstate_header(&hdr))
return -EINVAL;
+ /* Validate MXCSR when any of the related features is in use */
+ mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
+ if (hdr.xfeatures & mask) {
+ u32 mxcsr[2];
+
+ offset = offsetof(struct fxregs_state, mxcsr);
+ if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
+ return -EFAULT;
+
+ /* Reserved bits in MXCSR must be zero. */
+ if (mxcsr[0] & ~mxcsr_feature_mask)
+ return -EINVAL;
+
+ /* SSE and YMM require MXCSR even when FP is not in use. */
+ if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
+ xsave->i387.mxcsr = mxcsr[0];
+ xsave->i387.mxcsr_mask = mxcsr[1];
+ }
+ }
+
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
@@ -1240,18 +1135,11 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
offset = xstate_offsets[i];
size = xstate_sizes[i];
- if (__copy_from_user(dst, ubuf + offset, size))
+ if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
return -EFAULT;
}
}
- if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
- return -EFAULT;
- }
-
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
@@ -1267,130 +1155,94 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
}
/*
- * Save only supervisor states to the kernel buffer. This blows away all
- * old states, and is intended to be used only in __fpu__restore_sig(), where
- * user states are restored from the user buffer.
+ * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
+ * format and copy to the target thread. This is called from
+ * xstateregs_set().
*/
-void copy_supervisor_to_kernel(struct xregs_state *xstate)
+int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
{
- struct xstate_header *header;
- u64 max_bit, min_bit;
- u32 lmask, hmask;
- int err, i;
-
- if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (!xfeatures_mask_supervisor())
- return;
-
- max_bit = __fls(xfeatures_mask_supervisor());
- min_bit = __ffs(xfeatures_mask_supervisor());
-
- lmask = xfeatures_mask_supervisor();
- hmask = xfeatures_mask_supervisor() >> 32;
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+ return copy_uabi_to_xstate(xsave, kbuf, NULL);
+}
- /* We should never fault when copying to a kernel buffer: */
- if (WARN_ON_FPU(err))
- return;
+/*
+ * Convert from a sigreturn standard-format user-space buffer to kernel
+ * XSAVE[S] format and copy to the target thread. This is called from the
+ * sigreturn() and rt_sigreturn() system calls.
+ */
+int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave,
+ const void __user *ubuf)
+{
+ return copy_uabi_to_xstate(xsave, NULL, ubuf);
+}
- /*
- * At this point, the buffer has only supervisor states and must be
- * converted back to normal kernel format.
- */
- header = &xstate->header;
- header->xcomp_bv |= xfeatures_mask_all;
+static bool validate_xsaves_xrstors(u64 mask)
+{
+ u64 xchk;
+ if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
+ return false;
/*
- * This only moves states up in the buffer. Start with
- * the last state and move backwards so that states are
- * not overwritten until after they are moved. Note:
- * memmove() allows overlapping src/dst buffers.
+ * Validate that this is either a task->fpstate related component
+ * subset or an independent one.
*/
- for (i = max_bit; i >= min_bit; i--) {
- u8 *xbuf = (u8 *)xstate;
+ if (mask & xfeatures_mask_independent())
+ xchk = ~xfeatures_mask_independent();
+ else
+ xchk = ~xfeatures_mask_all;
- if (!((header->xfeatures >> i) & 1))
- continue;
+ if (WARN_ON_ONCE(!mask || mask & xchk))
+ return false;
- /* Move xfeature 'i' into its normal location */
- memmove(xbuf + xstate_comp_offsets[i],
- xbuf + xstate_supervisor_only_offsets[i],
- xstate_sizes[i]);
- }
+ return true;
}
/**
- * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to
- * an xsave area
- * @xstate: A pointer to an xsave area
- * @mask: Represent the dynamic supervisor features saved into the xsave area
+ * xsaves - Save selected components to a kernel xstate buffer
+ * @xstate: Pointer to the buffer
+ * @mask: Feature mask to select the components to save
*
- * Only the dynamic supervisor states sets in the mask are saved into the xsave
- * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic
- * supervisor feature). Besides the dynamic supervisor states, the legacy
- * region and XSAVE header are also saved into the xsave area. The supervisor
- * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
- * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
+ * The @xstate buffer must be 64 byte aligned and correctly initialized as
+ * XSAVES does not write the full xstate header. Before first use the
+ * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
+ * can #GP.
*
- * The xsave area must be 64-bytes aligned.
+ * The feature mask must either be a subset of the independent features or
+ * a subset of the task->fpstate related features.
*/
-void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
+void xsaves(struct xregs_state *xstate, u64 mask)
{
- u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
- u32 lmask, hmask;
int err;
- if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (WARN_ON_FPU(!dynamic_mask))
+ if (!validate_xsaves_xrstors(mask))
return;
- lmask = dynamic_mask;
- hmask = dynamic_mask >> 32;
-
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
-
- /* Should never fault when copying to a kernel buffer */
- WARN_ON_FPU(err);
+ XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
+ WARN_ON_ONCE(err);
}
/**
- * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from
- * an xsave area
- * @xstate: A pointer to an xsave area
- * @mask: Represent the dynamic supervisor features restored from the xsave area
+ * xrstors - Restore selected components from a kernel xstate buffer
+ * @xstate: Pointer to the buffer
+ * @mask: Feature mask to select the components to restore
*
- * Only the dynamic supervisor states sets in the mask are restored from the
- * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of
- * dynamic supervisor feature). Besides the dynamic supervisor states, the
- * legacy region and XSAVE header are also restored from the xsave area. The
- * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
- * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
+ * The @xstate buffer must be 64 byte aligned and correctly initialized
+ * otherwise XRSTORS from that buffer can #GP.
*
- * The xsave area must be 64-bytes aligned.
+ * Proper usage is to restore the state which was saved with
+ * xsaves() into @xstate.
+ *
+ * The feature mask must either be a subset of the independent features or
+ * a subset of the task->fpstate related features.
*/
-void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask)
+void xrstors(struct xregs_state *xstate, u64 mask)
{
- u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
- u32 lmask, hmask;
int err;
- if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (WARN_ON_FPU(!dynamic_mask))
+ if (!validate_xsaves_xrstors(mask))
return;
- lmask = dynamic_mask;
- hmask = dynamic_mask >> 32;
-
- XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
-
- /* Should never fault when copying from a kernel buffer */
- WARN_ON_FPU(err);
+ XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
+ WARN_ON_ONCE(err);
}
#ifdef CONFIG_PROC_PID_ARCH_STATUS
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e52b208b4641..1d9463e3096b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -87,8 +87,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
-
- return fpu__copy(dst, src);
+ return fpu_clone(dst);
}
/*
@@ -157,11 +156,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) {
+ p->thread.pkru = pkru_get_init_value();
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, sp, arg);
return 0;
}
+ /*
+ * Clone current's PKRU value from hardware. tsk->thread.pkru
+ * is only valid when scheduled out.
+ */
+ p->thread.pkru = read_pkru();
+
frame->bx = 0;
*childregs = *current_pt_regs();
childregs->ax = 0;
@@ -199,6 +205,15 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
return ret;
}
+static void pkru_flush_thread(void)
+{
+ /*
+ * If PKRU is enabled the default PKRU value has to be loaded into
+ * the hardware right here (similar to context switch).
+ */
+ pkru_write_default();
+}
+
void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -206,7 +221,8 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- fpu__clear_all(&tsk->thread.fpu);
+ fpu_flush_thread();
+ pkru_flush_thread();
}
void disable_TSC(void)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index d08307df69ad..ec0d836a13b1 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -41,6 +41,7 @@
#include <linux/syscalls.h>
#include <asm/processor.h>
+#include <asm/pkru.h>
#include <asm/fpu/internal.h>
#include <asm/mmu_context.h>
#include <asm/prctl.h>
@@ -136,7 +137,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (boot_cpu_has(X86_FEATURE_OSPKE))
+ if (cpu_feature_enabled(X86_FEATURE_OSPKE))
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
@@ -339,6 +340,29 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+/*
+ * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
+ * is not XSTATE managed on context switch because that would require a
+ * lookup in the task's FPU xsave buffer and require to keep that updated
+ * in various places.
+ */
+static __always_inline void x86_pkru_load(struct thread_struct *prev,
+ struct thread_struct *next)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
+ /* Stash the prev task's value: */
+ prev->pkru = rdpkru();
+
+ /*
+ * PKRU writes are slightly expensive. Avoid them when not
+ * strictly necessary:
+ */
+ if (prev->pkru != next->pkru)
+ wrpkru(next->pkru);
+}
+
static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
struct thread_struct *next)
{
@@ -588,6 +612,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
x86_fsgsbase_load(prev, next);
+ x86_pkru_load(prev, next);
+
/*
* Switch the PDA and FPU contexts.
*/
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9f1d9215a9fb..bff3a784aec5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -847,10 +847,7 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = _brk_end;
+ setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e12779a2714d..f4d21e470083 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -212,6 +212,11 @@ do { \
* Set up a signal frame.
*/
+/* x86 ABI requires 16-byte alignment */
+#define FRAME_ALIGNMENT 16UL
+
+#define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1)
+
/*
* Determine which stack to use..
*/
@@ -222,9 +227,9 @@ static unsigned long align_sigframe(unsigned long sp)
* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0.
*/
- sp = ((sp + 4) & -16ul) - 4;
+ sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
#else /* !CONFIG_X86_32 */
- sp = round_down(sp, 16) - 8;
+ sp = round_down(sp, FRAME_ALIGNMENT) - 8;
#endif
return sp;
}
@@ -234,10 +239,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
void __user **fpstate)
{
/* Default to using normal stack */
+ bool nested_altstack = on_sig_stack(regs->sp);
+ bool entering_altstack = false;
unsigned long math_size = 0;
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
- int onsigstack = on_sig_stack(sp);
int ret;
/* redzone */
@@ -246,15 +252,23 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
+ /*
+ * This checks nested_altstack via sas_ss_flags(). Sensible
+ * programs use SS_AUTODISARM, which disables that check, and
+ * programs that don't use SS_AUTODISARM get compatible.
+ */
+ if (sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
+ entering_altstack = true;
+ }
} else if (IS_ENABLED(CONFIG_X86_32) &&
- !onsigstack &&
+ !nested_altstack &&
regs->ss != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
ka->sa.sa_restorer) {
/* This is the legacy signal stack switching. */
sp = (unsigned long) ka->sa.sa_restorer;
+ entering_altstack = true;
}
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
@@ -267,8 +281,15 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
- if (onsigstack && !likely(on_sig_stack(sp)))
+ if (unlikely((nested_altstack || entering_altstack) &&
+ !__on_sig_stack(sp))) {
+
+ if (show_unhandled_signals && printk_ratelimit())
+ pr_info("%s[%d] overflowed sigaltstack\n",
+ current->comm, task_pid_nr(current));
+
return (void __user *)-1L;
+ }
/* save i387 and extended state */
ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
@@ -663,6 +684,61 @@ badframe:
return 0;
}
+/*
+ * There are four different struct types for signal frame: sigframe_ia32,
+ * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
+ * -- the largest size. It means the size for 64-bit apps is a bit more
+ * than needed, but this keeps the code simple.
+ */
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32)
+#else
+# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe)
+#endif
+
+/*
+ * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
+ * If a signal frame starts at an unaligned address, extra space is required.
+ * This is the max alignment padding, conservatively.
+ */
+#define MAX_XSAVE_PADDING 63UL
+
+/*
+ * The frame data is composed of the following areas and laid out as:
+ *
+ * -------------------------
+ * | alignment padding |
+ * -------------------------
+ * | (f)xsave frame |
+ * -------------------------
+ * | fsave header |
+ * -------------------------
+ * | alignment padding |
+ * -------------------------
+ * | siginfo + ucontext |
+ * -------------------------
+ */
+
+/* max_frame_size tells userspace the worst case signal stack size. */
+static unsigned long __ro_after_init max_frame_size;
+
+void __init init_sigframe_size(void)
+{
+ max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
+
+ max_frame_size += fpu__get_fpstate_size() + MAX_XSAVE_PADDING;
+
+ /* Userspace expects an aligned size. */
+ max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
+
+ pr_info("max sigframe size: %lu\n", max_frame_size);
+}
+
+unsigned long get_sigframe_size(void)
+{
+ return max_frame_size;
+}
+
static inline int is_ia32_compat_frame(struct ksignal *ksig)
{
return IS_ENABLED(CONFIG_IA32_EMULATION) &&
diff --git a/arch/x86/kernel/trace.c b/arch/x86/kernel/trace.c
new file mode 100644
index 000000000000..6b73b6f92ad3
--- /dev/null
+++ b/arch/x86/kernel/trace.c
@@ -0,0 +1,234 @@
+#include <asm/trace/irq_vectors.h>
+#include <linux/trace.h>
+
+#if defined(CONFIG_OSNOISE_TRACER) && defined(CONFIG_X86_LOCAL_APIC)
+/*
+ * trace_intel_irq_entry - record intel specific IRQ entry
+ */
+static void trace_intel_irq_entry(void *data, int vector)
+{
+ osnoise_trace_irq_entry(vector);
+}
+
+/*
+ * trace_intel_irq_exit - record intel specific IRQ exit
+ */
+static void trace_intel_irq_exit(void *data, int vector)
+{
+ char *vector_desc = (char *) data;
+
+ osnoise_trace_irq_exit(vector, vector_desc);
+}
+
+/*
+ * register_intel_irq_tp - Register intel specific IRQ entry tracepoints
+ */
+int osnoise_arch_register(void)
+{
+ int ret;
+
+ ret = register_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ if (ret)
+ goto out_timer_entry;
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ ret = register_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_timer_exit;
+
+ ret = register_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ if (ret)
+ goto out_thermal_entry;
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+#ifdef CONFIG_X86_MCE_AMD
+ ret = register_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_thermal_exit;
+
+ ret = register_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ if (ret)
+ goto out_deferred_entry;
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ ret = register_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_deferred_exit;
+
+ ret = register_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ if (ret)
+ goto out_threshold_entry;
+#endif /* CONFIG_X86_MCE_THRESHOLD */
+
+#ifdef CONFIG_SMP
+ ret = register_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_threshold_exit;
+
+ ret = register_trace_call_function_single_exit(trace_intel_irq_exit,
+ "call_function_single");
+ if (ret)
+ goto out_call_function_single_entry;
+
+ ret = register_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_single_exit;
+
+ ret = register_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ if (ret)
+ goto out_call_function_entry;
+
+ ret = register_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_exit;
+
+ ret = register_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ if (ret)
+ goto out_reschedule_entry;
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_IRQ_WORK
+ ret = register_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_reschedule_exit;
+
+ ret = register_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ if (ret)
+ goto out_irq_work_entry;
+#endif
+
+ ret = register_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_irq_work_exit;
+
+ ret = register_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ if (ret)
+ goto out_x86_ipi_entry;
+
+ ret = register_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_x86_ipi_exit;
+
+ ret = register_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ if (ret)
+ goto out_error_apic_entry;
+
+ ret = register_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_error_apic_exit;
+
+ ret = register_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ if (ret)
+ goto out_spurious_apic_entry;
+
+ return 0;
+
+out_spurious_apic_entry:
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+out_error_apic_exit:
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+out_error_apic_entry:
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+out_x86_ipi_exit:
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+out_x86_ipi_entry:
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+out_irq_work_exit:
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+out_irq_work_entry:
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+out_reschedule_exit:
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+out_reschedule_entry:
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+out_call_function_exit:
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+out_call_function_entry:
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+out_call_function_single_exit:
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+out_call_function_single_entry:
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+out_threshold_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+out_threshold_entry:
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+out_deferred_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+out_deferred_entry:
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+out_thermal_exit:
+#endif /* CONFIG_X86_MCE_AMD */
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+out_thermal_entry:
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+out_timer_exit:
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+out_timer_entry:
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+out_err:
+ return -EINVAL;
+}
+
+void osnoise_arch_unregister(void)
+{
+ unregister_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+}
+#endif /* CONFIG_OSNOISE_TRAECR && CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ed540e09a399..a58800973aed 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -1046,9 +1046,10 @@ static void math_error(struct pt_regs *regs, int trapnr)
}
/*
- * Save the info for the exception handler and clear the error.
+ * Synchronize the FPU register state to the memory register state
+ * if necessary. This allows the exception handler to inspect it.
*/
- fpu__save(fpu);
+ fpu_sync_fpstate(fpu);
task->thread.trap_nr = trapnr;
task->thread.error_code = 0;