summaryrefslogtreecommitdiff
path: root/arch/x86/entry/calling.h
diff options
context:
space:
mode:
authorDominik Brodowski <linux@dominikbrodowski.net>2018-02-11 11:49:45 +0100
committerIngo Molnar <mingo@kernel.org>2018-02-13 09:04:53 +0100
commit3f01daecd545e818098d84fd1ad43e19a508d705 (patch)
treed70e4a362d5c8c41ef771107726a8e0ff4972d69 /arch/x86/entry/calling.h
parentf7bafa2b05ef25eda1d9179fd930b0330cf2b7d1 (diff)
x86/entry/64: Introduce the PUSH_AND_CLEAN_REGS macro
Those instances where ALLOC_PT_GPREGS_ON_STACK is called just before SAVE_AND_CLEAR_REGS can trivially be replaced by PUSH_AND_CLEAN_REGS. This macro uses PUSH instead of MOV and should therefore be faster, at least on newer CPUs. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-5-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry/calling.h')
-rw-r--r--arch/x86/entry/calling.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index a05cbb81268d..57b1b87a04f0 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -137,6 +137,42 @@ For 32-bit we have the following conventions - kernel is built with
UNWIND_HINT_REGS offset=\offset
.endm
+ .macro PUSH_AND_CLEAR_REGS
+ /*
+ * Push registers and sanitize registers of values that a
+ * speculation attack might otherwise want to exploit. The
+ * lower registers are likely clobbered well before they
+ * could be put to use in a speculative execution gadget.
+ * Interleave XOR with PUSH for better uop scheduling:
+ */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+ pushq %rdx /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq %rax /* pt_regs->ax */
+ pushq %r8 /* pt_regs->r8 */
+ xorq %r8, %r8 /* nospec r8 */
+ pushq %r9 /* pt_regs->r9 */
+ xorq %r9, %r9 /* nospec r9 */
+ pushq %r10 /* pt_regs->r10 */
+ xorq %r10, %r10 /* nospec r10 */
+ pushq %r11 /* pt_regs->r11 */
+ xorq %r11, %r11 /* nospec r11*/
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx*/
+ pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp*/
+ pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12*/
+ pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13*/
+ pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14*/
+ pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15*/
+ UNWIND_HINT_REGS
+ .endm
+
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14