summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/smap.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/smap.h')
-rw-r--r--arch/x86/include/asm/smap.h129
1 files changed, 74 insertions, 55 deletions
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 8d3120f4e270..977bef14a0ab 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -1,91 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Supervisor Mode Access Prevention support
*
* Copyright (C) 2012 Intel Corporation
* Author: H. Peter Anvin <hpa@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H
-#include <linux/stringify.h>
#include <asm/nops.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative.h>
-/* "Raw" instruction opcodes */
-#define __ASM_CLAC .byte 0x0f,0x01,0xca
-#define __ASM_STAC .byte 0x0f,0x01,0xcb
+#ifdef __ASSEMBLER__
-#ifdef __ASSEMBLY__
+#define ASM_CLAC \
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
-#include <asm/alternative-asm.h>
+#define ASM_STAC \
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
-#ifdef CONFIG_X86_SMAP
+#else /* __ASSEMBLER__ */
-#define ASM_CLAC \
- 661: ASM_NOP3 ; \
- .pushsection .altinstr_replacement, "ax" ; \
- 662: __ASM_CLAC ; \
- .popsection ; \
- .pushsection .altinstructions, "a" ; \
- altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
- .popsection
+/*
+ * The CLAC/STAC instructions toggle the enforcement of
+ * X86_FEATURE_SMAP along with X86_FEATURE_LASS.
+ *
+ * SMAP enforcement is based on the _PAGE_BIT_USER bit in the page
+ * tables. The kernel is not allowed to touch pages with that bit set
+ * unless the AC bit is set.
+ *
+ * Use stac()/clac() when accessing userspace (_PAGE_USER) mappings,
+ * regardless of location.
+ *
+ * Note: a barrier is implicit in alternative().
+ */
-#define ASM_STAC \
- 661: ASM_NOP3 ; \
- .pushsection .altinstr_replacement, "ax" ; \
- 662: __ASM_STAC ; \
- .popsection ; \
- .pushsection .altinstructions, "a" ; \
- altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
- .popsection
+static __always_inline void clac(void)
+{
+ alternative("", "clac", X86_FEATURE_SMAP);
+}
-#else /* CONFIG_X86_SMAP */
+static __always_inline void stac(void)
+{
+ alternative("", "stac", X86_FEATURE_SMAP);
+}
-#define ASM_CLAC
-#define ASM_STAC
+/*
+ * LASS enforcement is based on bit 63 of the virtual address. The
+ * kernel is not allowed to touch memory in the lower half of the
+ * virtual address space.
+ *
+ * Use lass_stac()/lass_clac() to toggle the AC bit for kernel data
+ * accesses (!_PAGE_USER) that are blocked by LASS, but not by SMAP.
+ *
+ * Even with the AC bit set, LASS will continue to block instruction
+ * fetches from the user half of the address space. To allow those,
+ * clear CR4.LASS to disable the LASS mechanism entirely.
+ *
+ * Note: a barrier is implicit in alternative().
+ */
-#endif /* CONFIG_X86_SMAP */
+static __always_inline void lass_clac(void)
+{
+ alternative("", "clac", X86_FEATURE_LASS);
+}
-#else /* __ASSEMBLY__ */
+static __always_inline void lass_stac(void)
+{
+ alternative("", "stac", X86_FEATURE_LASS);
+}
-#include <asm/alternative.h>
+static __always_inline unsigned long smap_save(void)
+{
+ unsigned long flags;
-#ifdef CONFIG_X86_SMAP
+ asm volatile ("# smap_save\n\t"
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
+ "", "pushf; pop %0; clac",
+ X86_FEATURE_SMAP)
+ : "=rm" (flags) : : "memory", "cc");
-static __always_inline void clac(void)
-{
- /* Note: a barrier is implicit in alternative() */
- alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+ return flags;
}
-static __always_inline void stac(void)
+static __always_inline void smap_restore(unsigned long flags)
{
- /* Note: a barrier is implicit in alternative() */
- alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+ asm volatile ("# smap_restore\n\t"
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
+ "", "push %0; popf",
+ X86_FEATURE_SMAP)
+ : : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
-
-#else /* CONFIG_X86_SMAP */
-
-static inline void clac(void) { }
-static inline void stac(void) { }
-
-#define ASM_CLAC
-#define ASM_STAC
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
-#endif /* CONFIG_X86_SMAP */
+#define ASM_CLAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "clac", X86_FEATURE_SMAP)
+#define ASM_STAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "stac", X86_FEATURE_SMAP)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SMAP_H */