diff options
Diffstat (limited to 'arch/x86')
27 files changed, 124 insertions, 186 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 08e511657f05..58d890fe2100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -99,7 +99,6 @@ config X86 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PREEMPT_LAZY - select ARCH_HAS_PTE_DEVMAP if X86_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2 @@ -124,6 +123,7 @@ config X86 select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096 diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index f1b6d40154e3..f1adfba1a76e 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -104,10 +104,12 @@ static void crypto_aegis128_aesni_process_ad( } } -static __always_inline void +static __always_inline int crypto_aegis128_aesni_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, bool enc) { + int err = 0; + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { if (enc) aegis128_aesni_enc(state, walk->src.virt.addr, @@ -119,7 +121,10 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, walk->dst.virt.addr, round_down(walk->nbytes, AEGIS128_BLOCK_SIZE)); - skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + kernel_fpu_end(); + err = skcipher_walk_done(walk, + walk->nbytes % AEGIS128_BLOCK_SIZE); + kernel_fpu_begin(); } if (walk->nbytes) { @@ -131,8 +136,11 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, aegis128_aesni_dec_tail(state, walk->src.virt.addr, walk->dst.virt.addr, walk->nbytes); - skcipher_walk_done(walk, 0); + kernel_fpu_end(); + err = skcipher_walk_done(walk, 0); + kernel_fpu_begin(); } + return err; } static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead) @@ -165,7 +173,7 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm, return 0; } -static __always_inline void +static __always_inline int crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_block *tag_xor, unsigned int cryptlen, bool enc) @@ -174,20 +182,24 @@ crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); struct skcipher_walk walk; struct aegis_state state; + int err; if (enc) - skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); else - skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; kernel_fpu_begin(); aegis128_aesni_init(&state, &ctx->key, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, &walk, enc); - aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); - + err = crypto_aegis128_aesni_process_crypt(&state, &walk, enc); + if (err == 0) + aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); + return err; } static int crypto_aegis128_aesni_encrypt(struct aead_request *req) @@ -196,8 +208,11 @@ static int crypto_aegis128_aesni_encrypt(struct aead_request *req) struct aegis_block tag = {}; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen; + int err; - crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + if (err) + return err; scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, authsize, 1); @@ -212,11 +227,14 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req) struct aegis_block tag; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen - authsize; + int err; scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, authsize, 0); - crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + if (err) + return err; return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; } diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c index b4bddcd58457..007b250f774c 100644 --- a/arch/x86/crypto/aria_aesni_avx2_glue.c +++ b/arch/x86/crypto/aria_aesni_avx2_glue.c @@ -9,6 +9,7 @@ #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c index ab9b38d05332..4c88ef4eba82 100644 --- a/arch/x86/crypto/aria_aesni_avx_glue.c +++ b/arch/x86/crypto/aria_aesni_avx_glue.c @@ -9,6 +9,7 @@ #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index a7d162388142..5c321f255eb7 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -8,6 +8,7 @@ #include <crypto/algapi.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 3bd37d664121..cbede120e5f2 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -10,6 +10,7 @@ #include <linux/unaligned.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index dcfc0de333de..d587f05c3c8c 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -7,6 +7,7 @@ #include <crypto/curve25519.h> #include <crypto/internal/kpp.h> +#include <linux/export.h> #include <linux/types.h> #include <linux/jump_label.h> #include <linux/kernel.h> diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index e640abc1cb8a..9c8b3a335d5c 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <crypto/algapi.h> #include <crypto/serpent.h> diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c index 72867fc49ce8..88caf418a06f 100644 --- a/arch/x86/crypto/sm4_aesni_avx_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx_glue.c @@ -11,6 +11,7 @@ #include <asm/fpu/api.h> #include <linux/module.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/kernel.h> #include <crypto/internal/skcipher.h> #include <crypto/sm4.h> diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c index 4c67184dc573..8e9906d36902 100644 --- a/arch/x86/crypto/twofish_glue.c +++ b/arch/x86/crypto/twofish_glue.c @@ -40,6 +40,7 @@ #include <crypto/algapi.h> #include <crypto/twofish.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 1a1ecfa7f72a..8ad77725bf60 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -9,6 +9,7 @@ #include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index f0e9acf72547..20fcb8507ad1 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -32,45 +32,42 @@ #ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_X86_32 -# define __BUG_REL(val) ".long " __stringify(val) +# define __BUG_REL(val) ".long " val #else -# define __BUG_REL(val) ".long " __stringify(val) " - ." +# define __BUG_REL(val) ".long " val " - ." #endif #ifdef CONFIG_DEBUG_BUGVERBOSE +#define __BUG_ENTRY(file, line, flags) \ + "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \ + "\t" __BUG_REL(file) "\t# bug_entry::file\n" \ + "\t.word " line "\t# bug_entry::line\n" \ + "\t.word " flags "\t# bug_entry::flags\n" +#else +#define __BUG_ENTRY(file, line, flags) \ + "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \ + "\t.word " flags "\t# bug_entry::flags\n" +#endif + +#define _BUG_FLAGS_ASM(ins, file, line, flags, size, extra) \ + "1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + __BUG_ENTRY(file, line, flags) \ + "\t.org 2b + " size "\n" \ + ".popsection\n" \ + extra #define _BUG_FLAGS(ins, flags, extra) \ do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ - "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ - "\t.word %c1" "\t# bug_entry::line\n" \ - "\t.word %c2" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c3\n" \ - ".popsection\n" \ - extra \ + asm_inline volatile(_BUG_FLAGS_ASM(ins, "%c0", \ + "%c1", "%c2", "%c3", extra) \ : : "i" (__FILE__), "i" (__LINE__), \ "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) -#else /* !CONFIG_DEBUG_BUGVERBOSE */ - -#define _BUG_FLAGS(ins, flags, extra) \ -do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ - "\t.word %c0" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c1\n" \ - ".popsection\n" \ - extra \ - : : "i" (flags), \ - "i" (sizeof(struct bug_entry))); \ -} while (0) - -#endif /* CONFIG_DEBUG_BUGVERBOSE */ +#define ARCH_WARN_ASM(file, line, flags, size) \ + _BUG_FLAGS_ASM(ASM_UD2, file, line, flags, size, "") #else @@ -92,11 +89,14 @@ do { \ * were to trigger, we'd rather wreck the machine in an attempt to get the * message out than not know about it. */ + +#define ARCH_WARN_REACHABLE ANNOTATE_REACHABLE(1b) + #define __WARN_FLAGS(flags) \ do { \ __auto_type __flags = BUGFLAG_WARNING|(flags); \ instrumentation_begin(); \ - _BUG_FLAGS(ASM_UD2, __flags, ANNOTATE_REACHABLE(1b)); \ + _BUG_FLAGS(ASM_UD2, __flags, ARCH_WARN_REACHABLE); \ instrumentation_end(); \ } while (0) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 3e51ba459154..1751f1eb95ef 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -116,8 +116,6 @@ struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); #define __bpfcall -extern u32 cfi_bpf_hash; -extern u32 cfi_bpf_subprog_hash; static inline int cfi_get_offset(void) { @@ -135,6 +133,8 @@ static inline int cfi_get_offset(void) #define cfi_get_offset cfi_get_offset extern u32 cfi_get_func_hash(void *func); +#define cfi_get_func_hash cfi_get_func_hash + extern int cfi_get_func_arity(void *func); #ifdef CONFIG_FINEIBT @@ -153,12 +153,6 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } -#define cfi_bpf_hash 0U -#define cfi_bpf_subprog_hash 0U -static inline u32 cfi_get_func_hash(void *func) -{ - return 0; -} static inline int cfi_get_func_arity(void *func) { return 0; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 97954c936c54..e33df3da6980 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -301,16 +301,15 @@ static inline bool pmd_leaf(pmd_t pte) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */ static inline int pmd_trans_huge(pmd_t pmd) { - return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; + return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static inline int pud_trans_huge(pud_t pud) { - return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; + return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE; } #endif @@ -320,24 +319,6 @@ static inline int has_transparent_hugepage(void) return boot_cpu_has(X86_FEATURE_PSE); } -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pmd_devmap(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_DEVMAP); -} - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static inline int pud_devmap(pud_t pud) -{ - return !!(pud_val(pud) & _PAGE_DEVMAP); -} -#else -static inline int pud_devmap(pud_t pud) -{ - return 0; -} -#endif - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP static inline bool pmd_special(pmd_t pmd) { @@ -361,12 +342,6 @@ static inline pud_t pud_mkspecial(pud_t pud) return pud_set_flags(pud, _PAGE_SPECIAL); } #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} -#endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline pte_t pte_set_flags(pte_t pte, pteval_t set) @@ -527,11 +502,6 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL); } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); -} - /* See comments above mksaveddirty_shift() */ static inline pmd_t pmd_mksaveddirty(pmd_t pmd) { @@ -603,11 +573,6 @@ static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_DIRTY); } -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - return pmd_set_flags(pmd, _PAGE_DEVMAP); -} - static inline pmd_t pmd_mkhuge(pmd_t pmd) { return pmd_set_flags(pmd, _PAGE_PSE); @@ -673,11 +638,6 @@ static inline pud_t pud_mkdirty(pud_t pud) return pud_mksaveddirty(pud); } -static inline pud_t pud_mkdevmap(pud_t pud) -{ - return pud_set_flags(pud, _PAGE_DEVMAP); -} - static inline pud_t pud_mkhuge(pud_t pud) { return pud_set_flags(pud, _PAGE_PSE); @@ -1008,13 +968,6 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pte_devmap(pte_t a) -{ - return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; -} -#endif - #define pte_accessible pte_accessible static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index a5731fb1e9dd..2ec250ba467e 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -34,7 +34,6 @@ #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ -#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 #ifdef CONFIG_X86_64 #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ @@ -121,11 +120,9 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) -#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) #else #define _PAGE_NX (_AT(pteval_t, 0)) -#define _PAGE_DEVMAP (_AT(pteval_t, 0)) #define _PAGE_SOFTW4 (_AT(pteval_t, 0)) #endif @@ -154,7 +151,7 @@ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ - _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) + _PAGE_CC | _PAGE_UFFD_WP) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index e9b81876ebe4..00daedfefc1b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); static inline bool pte_flags_need_flush(unsigned long oldflags, diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ea1d984166cd..7bde68247b5f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -120,7 +120,7 @@ struct its_array its_pages; static void *__its_alloc(struct its_array *pages) { - void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE); + void *page __free(execmem) = execmem_alloc_rw(EXECMEM_MODULE_TEXT, PAGE_SIZE); if (!page) return NULL; @@ -237,7 +237,6 @@ static void *its_alloc(void) if (!page) return NULL; - execmem_make_temp_rw(page, PAGE_SIZE); if (pages == &its_pages) set_memory_x((unsigned long)page, 1); @@ -1184,43 +1183,6 @@ bool cfi_bhi __ro_after_init = false; #endif #ifdef CONFIG_CFI_CLANG -struct bpf_insn; - -/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ -extern unsigned int __bpf_prog_runX(const void *ctx, - const struct bpf_insn *insn); - -KCFI_REFERENCE(__bpf_prog_runX); - -/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_hash,@object \n" -" .globl cfi_bpf_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_hash: \n" -" .long __kcfi_typeid___bpf_prog_runX \n" -" .size cfi_bpf_hash, 4 \n" -" .popsection \n" -); - -/* Must match bpf_callback_t */ -extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); - -KCFI_REFERENCE(__bpf_callback_fn); - -/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_subprog_hash,@object \n" -" .globl cfi_bpf_subprog_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_subprog_hash: \n" -" .long __kcfi_typeid___bpf_callback_fn \n" -" .size cfi_bpf_subprog_hash, 4 \n" -" .popsection \n" -); - u32 cfi_get_func_hash(void *func) { u32 hash; diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 279148e72459..308dbbae6c6e 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -279,7 +279,7 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl, static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, unsigned long addr, - unsigned long vm_flags) + vm_flags_t vm_flags) { unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *entry; @@ -520,9 +520,9 @@ static void sgx_vma_open(struct vm_area_struct *vma) * Return: 0 on success, -EACCES otherwise */ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, - unsigned long end, unsigned long vm_flags) + unsigned long end, vm_flags_t vm_flags) { - unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; + vm_flags_t vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *page; unsigned long count = 0; int ret = 0; @@ -605,7 +605,7 @@ static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *pag */ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, unsigned long addr, - unsigned long vm_flags) + vm_flags_t vm_flags) { struct sgx_encl_page *entry; diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index f94ff14c9486..8ff47f6652b9 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -101,7 +101,7 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, } int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, - unsigned long end, unsigned long vm_flags); + unsigned long end, vm_flags_t vm_flags); bool current_is_ksgxd(void); void sgx_encl_release(struct kref *ref); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index bcb534688dfe..c6b12bed173d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -163,10 +163,10 @@ static struct crash_mem *fill_up_crash_elf_data(void) return NULL; /* - * Exclusion of crash region and/or crashk_low_res may cause - * another range split. So add extra two slots here. + * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges + * may cause range splits. So add extra slots here. */ - nr_ranges += 2; + nr_ranges += 2 + crashk_cma_cnt; cmem = vzalloc(struct_size(cmem, ranges, nr_ranges)); if (!cmem) return NULL; @@ -184,6 +184,7 @@ static struct crash_mem *fill_up_crash_elf_data(void) static int elf_header_exclude_ranges(struct crash_mem *cmem) { int ret = 0; + int i; /* Exclude the low 1M because it is always reserved */ ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1); @@ -198,8 +199,17 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem) if (crashk_low_res.end) ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + if (ret) + return ret; - return ret; + for (i = 0; i < crashk_cma_cnt; ++i) { + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start, + crashk_cma_ranges[i].end); + if (ret) + return ret; + } + + return 0; } static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) @@ -374,6 +384,14 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) add_e820_entry(params, &ei); } + for (i = 0; i < crashk_cma_cnt; ++i) { + ei.addr = crashk_cma_ranges[i].start; + ei.size = crashk_cma_ranges[i].end - + crashk_cma_ranges[i].start + 1; + ei.type = E820_TYPE_RAM; + add_e820_entry(params, &ei); + } + out: vfree(cmem); return ret; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 252e82bcfd2f..4450acec9390 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -263,7 +263,7 @@ void arch_ftrace_update_code(int command) static inline void *alloc_tramp(unsigned long size) { - return execmem_alloc(EXECMEM_FTRACE, size); + return execmem_alloc_rw(EXECMEM_FTRACE, size); } static inline void tramp_free(void *tramp) { diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 47cb8eb138ba..6079d15dab8c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -481,24 +481,6 @@ static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, return len; } -/* Make page to RO mode when allocate it */ -void *alloc_insn_page(void) -{ - void *page; - - page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); - if (!page) - return NULL; - - /* - * TODO: Once additional kernel code protection mechanisms are set, ensure - * that the page was not maliciously altered and it is still zeroed. - */ - set_memory_rox((unsigned long)page, 1); - - return page; -} - /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0792f31961ac..1b2edd07a3e1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -603,7 +603,7 @@ static void __init memblock_x86_reserve_range_setup_data(void) static void __init arch_reserve_crashkernel(void) { - unsigned long long crash_base, crash_size, low_size = 0; + unsigned long long crash_base, crash_size, low_size = 0, cma_size = 0; bool high = false; int ret; @@ -612,7 +612,7 @@ static void __init arch_reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, - &low_size, &high); + &low_size, &cma_size, &high); if (ret) return; @@ -622,6 +622,7 @@ static void __init arch_reserve_crashkernel(void) } reserve_crashkernel_generic(crash_size, crash_base, low_size, high); + reserve_crashkernel_cma(cma_size); } static struct resource standard_io_resources[] = { diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index d1b79b418c05..850972deac8e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -641,7 +641,7 @@ static void kvm_pit_reset(struct kvm_pit *pit) kvm_pit_reset_reinject(pit); } -static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) +static void pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); @@ -763,7 +763,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; - pit->mask_notifier.func = pit_mask_notifer; + pit->mask_notifier.func = pit_mask_notifier; kvm_pit_reset(pit); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7456df985d96..bb57e93b4caf 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -1063,13 +1063,9 @@ unsigned long arch_max_swapfile_size(void) static struct execmem_info execmem_info __ro_after_init; #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX -void execmem_fill_trapping_insns(void *ptr, size_t size, bool writeable) +void execmem_fill_trapping_insns(void *ptr, size_t size) { - /* fill memory with INT3 instructions */ - if (writeable) - memset(ptr, INT3_INSN_OPCODE, size); - else - text_poke_set(ptr, INT3_INSN_OPCODE, size); + memset(ptr, INT3_INSN_OPCODE, size); } #endif @@ -1102,7 +1098,21 @@ struct execmem_info __init *execmem_arch_setup(void) .pgprot = pgprot, .alignment = MODULE_ALIGN, }, - [EXECMEM_KPROBES ... EXECMEM_BPF] = { + [EXECMEM_KPROBES] = { + .flags = flags, + .start = start, + .end = MODULES_END, + .pgprot = PAGE_KERNEL_ROX, + .alignment = MODULE_ALIGN, + }, + [EXECMEM_FTRACE] = { + .flags = flags, + .start = start, + .end = MODULES_END, + .pgprot = pgprot, + .alignment = MODULE_ALIGN, + }, + [EXECMEM_BPF] = { .flags = EXECMEM_KASAN_SHADOW, .start = start, .end = MODULES_END, diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 2e7923844afe..c09284302dd3 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -36,7 +36,6 @@ #include <linux/debugfs.h> #include <linux/ioport.h> #include <linux/kernel.h> -#include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/mm.h> diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c index c84bd9540b16..dc1afd5c839d 100644 --- a/arch/x86/mm/pgprot.c +++ b/arch/x86/mm/pgprot.c @@ -32,7 +32,7 @@ void add_encrypt_protection_map(void) protection_map[i] = pgprot_encrypted(protection_map[i]); } -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { unsigned long val = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); |