diff options
Diffstat (limited to 'arch/x86/kernel/fpu/xstate.c')
-rw-r--r-- | arch/x86/kernel/fpu/xstate.c | 253 |
1 files changed, 110 insertions, 143 deletions
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index bda2e5eaca0e..7a2bf884fede 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -233,8 +233,10 @@ void fpu__init_cpu_xstate(void) /* * MSR_IA32_XSS sets supervisor states managed by XSAVES. */ - if (boot_cpu_has(X86_FEATURE_XSAVES)) - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); + if (boot_cpu_has(X86_FEATURE_XSAVES)) { + wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | + xfeatures_mask_dynamic()); + } } static bool xfeature_enabled(enum xfeature xfeature) @@ -486,7 +488,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr) return ebx; } -static int xfeature_size(int xfeature_nr) +int xfeature_size(int xfeature_nr) { u32 eax, ebx, ecx, edx; @@ -598,7 +600,8 @@ static void check_xstate_against_struct(int nr) */ if ((nr < XFEATURE_YMM) || (nr >= XFEATURE_MAX) || - (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR)) { + (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) || + ((nr >= XFEATURE_RSRVD_COMP_10) && (nr <= XFEATURE_LBR))) { WARN_ONCE(1, "no structure for xstate: %d\n", nr); XSTATE_WARN_ON(1); } @@ -847,8 +850,10 @@ void fpu__resume_cpu(void) * Restore IA32_XSS. The same CPUID bit enumerates support * of XSAVES and MSR_IA32_XSS. */ - if (boot_cpu_has(X86_FEATURE_XSAVES)) - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); + if (boot_cpu_has(X86_FEATURE_XSAVES)) { + wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | + xfeatures_mask_dynamic()); + } } /* @@ -1009,32 +1014,20 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) return true; } -static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count) +static void fill_gap(struct membuf *to, unsigned *last, unsigned offset) { - if (*pos < to) { - unsigned size = to - *pos; - - if (size > *count) - size = *count; - memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size); - *kbuf += size; - *pos += size; - *count -= size; - } + if (*last >= offset) + return; + membuf_write(to, (void *)&init_fpstate.xsave + *last, offset - *last); + *last = offset; } -static void copy_part(unsigned offset, unsigned size, void *from, - void **kbuf, unsigned *pos, unsigned *count) +static void copy_part(struct membuf *to, unsigned *last, unsigned offset, + unsigned size, void *from) { - fill_gap(offset, kbuf, pos, count); - if (size > *count) - size = *count; - if (size) { - memcpy(*kbuf, from, size); - *kbuf += size; - *pos += size; - *count -= size; - } + fill_gap(to, last, offset); + membuf_write(to, from, size); + *last = offset + size; } /* @@ -1044,20 +1037,15 @@ static void copy_part(unsigned offset, unsigned size, void *from, * It supports partial copy but pos always starts from zero. This is called * from xstateregs_get() and there we check the CPU has XSAVES. */ -int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) +void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave) { struct xstate_header header; const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr); - unsigned count = size_total; + unsigned size = to.left; + unsigned last = 0; int i; /* - * Currently copy_regset_to_user() starts from pos 0: - */ - if (unlikely(offset_start != 0)) - return -EFAULT; - - /* * The destination is a ptrace buffer; we put in only user xstates: */ memset(&header, 0, sizeof(header)); @@ -1065,27 +1053,26 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of header.xfeatures &= xfeatures_mask_user(); if (header.xfeatures & XFEATURE_MASK_FP) - copy_part(0, off_mxcsr, - &xsave->i387, &kbuf, &offset_start, &count); + copy_part(&to, &last, 0, off_mxcsr, &xsave->i387); if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)) - copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE, - &xsave->i387.mxcsr, &kbuf, &offset_start, &count); + copy_part(&to, &last, off_mxcsr, + MXCSR_AND_FLAGS_SIZE, &xsave->i387.mxcsr); if (header.xfeatures & XFEATURE_MASK_FP) - copy_part(offsetof(struct fxregs_state, st_space), 128, - &xsave->i387.st_space, &kbuf, &offset_start, &count); + copy_part(&to, &last, offsetof(struct fxregs_state, st_space), + 128, &xsave->i387.st_space); if (header.xfeatures & XFEATURE_MASK_SSE) - copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, - &xsave->i387.xmm_space, &kbuf, &offset_start, &count); + copy_part(&to, &last, xstate_offsets[XFEATURE_SSE], + 256, &xsave->i387.xmm_space); /* * Fill xsave->i387.sw_reserved value for ptrace frame: */ - copy_part(offsetof(struct fxregs_state, sw_reserved), 48, - xstate_fx_sw_bytes, &kbuf, &offset_start, &count); + copy_part(&to, &last, offsetof(struct fxregs_state, sw_reserved), + 48, xstate_fx_sw_bytes); /* * Copy xregs_state->header: */ - copy_part(offsetof(struct xregs_state, header), sizeof(header), - &header, &kbuf, &offset_start, &count); + copy_part(&to, &last, offsetof(struct xregs_state, header), + sizeof(header), &header); for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { /* @@ -1094,104 +1081,12 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of if ((header.xfeatures >> i) & 1) { void *src = __raw_xsave_addr(xsave, i); - copy_part(xstate_offsets[i], xstate_sizes[i], - src, &kbuf, &offset_start, &count); - } - - } - fill_gap(size_total, &kbuf, &offset_start, &count); - - return 0; -} - -static inline int -__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total) -{ - if (!size) - return 0; - - if (offset < size_total) { - unsigned int copy = min(size, size_total - offset); - - if (__copy_to_user(ubuf + offset, data, copy)) - return -EFAULT; - } - return 0; -} - -/* - * Convert from kernel XSAVES compacted format to standard format and copy - * to a user-space buffer. It supports partial copy but pos always starts from - * zero. This is called from xstateregs_get() and there we check the CPU - * has XSAVES. - */ -int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) -{ - unsigned int offset, size; - int ret, i; - struct xstate_header header; - - /* - * Currently copy_regset_to_user() starts from pos 0: - */ - if (unlikely(offset_start != 0)) - return -EFAULT; - - /* - * The destination is a ptrace buffer; we put in only user xstates: - */ - memset(&header, 0, sizeof(header)); - header.xfeatures = xsave->header.xfeatures; - header.xfeatures &= xfeatures_mask_user(); - - /* - * Copy xregs_state->header: - */ - offset = offsetof(struct xregs_state, header); - size = sizeof(header); - - ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total); - if (ret) - return ret; - - for (i = 0; i < XFEATURE_MAX; i++) { - /* - * Copy only in-use xstates: - */ - if ((header.xfeatures >> i) & 1) { - void *src = __raw_xsave_addr(xsave, i); - - offset = xstate_offsets[i]; - size = xstate_sizes[i]; - - /* The next component has to fit fully into the output buffer: */ - if (offset + size > size_total) - break; - - ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total); - if (ret) - return ret; + copy_part(&to, &last, xstate_offsets[i], + xstate_sizes[i], src); } } - - if (xfeatures_mxcsr_quirk(header.xfeatures)) { - offset = offsetof(struct fxregs_state, mxcsr); - size = MXCSR_AND_FLAGS_SIZE; - __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total); - } - - /* - * Fill xsave->i387.sw_reserved value for ptrace frame: - */ - offset = offsetof(struct fxregs_state, sw_reserved); - size = sizeof(xstate_fx_sw_bytes); - - ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total); - if (ret) - return ret; - - return 0; + fill_gap(&to, &last, size); } /* @@ -1356,6 +1251,78 @@ void copy_supervisor_to_kernel(struct xregs_state *xstate) } } +/** + * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to + * an xsave area + * @xstate: A pointer to an xsave area + * @mask: Represent the dynamic supervisor features saved into the xsave area + * + * Only the dynamic supervisor states sets in the mask are saved into the xsave + * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic + * supervisor feature). Besides the dynamic supervisor states, the legacy + * region and XSAVE header are also saved into the xsave area. The supervisor + * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and + * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved. + * + * The xsave area must be 64-bytes aligned. + */ +void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask) +{ + u64 dynamic_mask = xfeatures_mask_dynamic() & mask; + u32 lmask, hmask; + int err; + + if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES))) + return; + + if (WARN_ON_FPU(!dynamic_mask)) + return; + + lmask = dynamic_mask; + hmask = dynamic_mask >> 32; + + XSTATE_OP(XSAVES, xstate, lmask, hmask, err); + + /* Should never fault when copying to a kernel buffer */ + WARN_ON_FPU(err); +} + +/** + * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from + * an xsave area + * @xstate: A pointer to an xsave area + * @mask: Represent the dynamic supervisor features restored from the xsave area + * + * Only the dynamic supervisor states sets in the mask are restored from the + * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of + * dynamic supervisor feature). Besides the dynamic supervisor states, the + * legacy region and XSAVE header are also restored from the xsave area. The + * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and + * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored. + * + * The xsave area must be 64-bytes aligned. + */ +void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask) +{ + u64 dynamic_mask = xfeatures_mask_dynamic() & mask; + u32 lmask, hmask; + int err; + + if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES))) + return; + + if (WARN_ON_FPU(!dynamic_mask)) + return; + + lmask = dynamic_mask; + hmask = dynamic_mask >> 32; + + XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); + + /* Should never fault when copying from a kernel buffer */ + WARN_ON_FPU(err); +} + #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * Report the amount of time elapsed in millisecond since last AVX512 |