diff options
Diffstat (limited to 'arch/arm64')
96 files changed, 536 insertions, 336 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6663ffd23f25..65db12f66b8f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -47,7 +47,6 @@ config ARM64 select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY - select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_FORCE_DMA_UNENCRYPTED select ARCH_STACKWALK select ARCH_HAS_STRICT_KERNEL_RWX @@ -2023,6 +2022,31 @@ config ARM64_TLB_RANGE ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a range of input addresses. +config ARM64_MPAM + bool "Enable support for MPAM" + select ARM64_MPAM_DRIVER if EXPERT # does nothing yet + select ACPI_MPAM if ACPI + help + Memory System Resource Partitioning and Monitoring (MPAM) is an + optional extension to the Arm architecture that allows each + transaction issued to the memory system to be labelled with a + Partition identifier (PARTID) and Performance Monitoring Group + identifier (PMG). + + Memory system components, such as the caches, can be configured with + policies to control how much of various physical resources (such as + memory bandwidth or cache memory) the transactions labelled with each + PARTID can consume. Depending on the capabilities of the hardware, + the PARTID and PMG can also be used as filtering criteria to measure + the memory system resource consumption of different parts of a + workload. + + Use of this extension requires CPU support, support in the + Memory System Components (MSC), and a description from firmware + of where the MSCs are in the address space. + + MPAM is exposed to user-space via the resctrl pseudo filesystem. + endmenu # "ARMv8.4 architectural features" menu "ARMv8.5 architectural features" diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index c8c77f9e36d6..862416624852 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -19,7 +19,7 @@ #error "cpucaps have overflown ARM64_CB_BIT" #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/stringify.h> @@ -207,7 +207,7 @@ alternative_endif #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* * Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap)); @@ -219,7 +219,7 @@ alternative_endif #define ALTERNATIVE(oldinstr, newinstr, ...) \ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -263,6 +263,6 @@ l_yes: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ALTERNATIVE_MACROS_H */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 00d97b8a757f..607a21e7dd9c 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -4,7 +4,7 @@ #include <asm/alternative-macros.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/init.h> #include <linux/types.h> @@ -34,5 +34,5 @@ static inline void apply_alternatives_module(void *start, size_t length) { } void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ALTERNATIVE_H */ diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 9e96f024b2f1..d20b03931a8d 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -9,7 +9,7 @@ #include <asm/sysreg.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/irqchip/arm-gic-common.h> #include <linux/stringify.h> @@ -188,5 +188,5 @@ static inline bool gic_has_relaxed_pmr_sync(void) return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 292f2687a12e..d67e2fdd1aee 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -27,7 +27,7 @@ /* Data fields for EX_TYPE_UACCESS_CPY */ #define EX_DATA_UACCESS_WRITE BIT(0) -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ .pushsection __ex_table, "a"; \ @@ -77,7 +77,7 @@ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_CPY, \uaccess_is_write) .endm -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #include <linux/stringify.h> @@ -132,6 +132,6 @@ EX_DATA_REG(ADDR, addr) \ ")") -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 23be85d93348..e9267623796c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -5,7 +5,7 @@ * Copyright (C) 1996-2000 Russell King * Copyright (C) 2012 ARM Ltd. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #error "Only include this from assembly code" #endif @@ -371,7 +371,7 @@ alternative_endif * [start, end) with dcache line size explicitly provided. * * op: operation passed to dc instruction - * domain: domain used in dsb instruciton + * domain: domain used in dsb instruction * start: starting virtual address of the region * end: end virtual address of the region * linesz: dcache line size @@ -412,7 +412,7 @@ alternative_endif * [start, end) * * op: operation passed to dc instruction - * domain: domain used in dsb instruciton + * domain: domain used in dsb instruction * start: starting virtual address of the region * end: end virtual address of the region * fixup: optional label to branch to on user fault diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 87f568a94e55..afad1849c4cf 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -103,17 +103,17 @@ static __always_inline void __lse_atomic_and(int i, atomic_t *v) return __lse_atomic_andnot(~i, v); } -#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ +#define ATOMIC_FETCH_OP_AND(name) \ static __always_inline int \ __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ return __lse_atomic_fetch_andnot##name(~i, v); \ } -ATOMIC_FETCH_OP_AND(_relaxed, ) -ATOMIC_FETCH_OP_AND(_acquire, a, "memory") -ATOMIC_FETCH_OP_AND(_release, l, "memory") -ATOMIC_FETCH_OP_AND( , al, "memory") +ATOMIC_FETCH_OP_AND(_relaxed) +ATOMIC_FETCH_OP_AND(_acquire) +ATOMIC_FETCH_OP_AND(_release) +ATOMIC_FETCH_OP_AND( ) #undef ATOMIC_FETCH_OP_AND @@ -210,17 +210,17 @@ static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v) return __lse_atomic64_andnot(~i, v); } -#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ +#define ATOMIC64_FETCH_OP_AND(name) \ static __always_inline long \ __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ return __lse_atomic64_fetch_andnot##name(~i, v); \ } -ATOMIC64_FETCH_OP_AND(_relaxed, ) -ATOMIC64_FETCH_OP_AND(_acquire, a, "memory") -ATOMIC64_FETCH_OP_AND(_release, l, "memory") -ATOMIC64_FETCH_OP_AND( , al, "memory") +ATOMIC64_FETCH_OP_AND(_relaxed) +ATOMIC64_FETCH_OP_AND(_acquire) +ATOMIC64_FETCH_OP_AND(_release) +ATOMIC64_FETCH_OP_AND( ) #undef ATOMIC64_FETCH_OP_AND diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f5801b0ba9e9..9495c4441a46 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -7,7 +7,7 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/kasan-checks.h> @@ -221,6 +221,6 @@ do { \ #include <asm-generic/barrier.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 09963004ceea..dd2c8586a725 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -35,7 +35,7 @@ #define ARCH_DMA_MINALIGN (128) #define ARCH_KMALLOC_MINALIGN (8) -#if !defined(__ASSEMBLY__) && !defined(BUILD_VDSO) +#if !defined(__ASSEMBLER__) && !defined(BUILD_VDSO) #include <linux/bitops.h> #include <linux/kasan-enabled.h> @@ -135,6 +135,6 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) return ctr; } -#endif /* !defined(__ASSEMBLY__) && !defined(BUILD_VDSO) */ +#endif /* !defined(__ASSEMBLER__) && !defined(BUILD_VDSO) */ #endif diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 9d769291a306..2c8029472ad4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -5,7 +5,7 @@ #include <asm/cpucap-defs.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> /* * Check whether a cpucap is possible at compiletime. @@ -77,6 +77,6 @@ cpucap_is_possible(const unsigned int cap) return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e223cbf350e4..4de51f8d92cb 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -19,7 +19,7 @@ #define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bug.h> #include <linux/jump_label.h> @@ -199,7 +199,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; * registers (e.g, SCTLR, TCR etc.) or patching the kernel via * alternatives. The kernel patching is batched and performed at later * point. The actions are always initiated only after the capability - * is finalised. This is usally denoted by "enabling" the capability. + * is finalised. This is usually denoted by "enabling" the capability. * The actions are initiated as follows : * a) Action is triggered on all online CPUs, after the capability is * finalised, invoked within the stop_machine() context from @@ -251,7 +251,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0)) #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1)) /* - * The capabilitiy is detected on the Boot CPU and is used by kernel + * The capability is detected on the Boot CPU and is used by kernel * during early boot. i.e, the capability should be "detected" and * "enabled" as early as possibly on all booting CPUs. */ @@ -1078,6 +1078,6 @@ static inline bool cpu_has_lpa2(void) #endif } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 9b00b75acbf2..024b4fa976a8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -249,7 +249,7 @@ #define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0)) #define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/sysreg.h> @@ -328,6 +328,6 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(CTR_EL0); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h index 54ceae0874c7..c92912eaf186 100644 --- a/arch/arm64/include/asm/current.h +++ b/arch/arm64/include/asm/current.h @@ -4,7 +4,7 @@ #include <linux/compiler.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; @@ -23,7 +23,7 @@ static __always_inline struct task_struct *get_current(void) #define current get_current() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_CURRENT_H */ diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index f5e3ed2420ce..8d5f92418838 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -48,7 +48,7 @@ #define AARCH32_BREAK_THUMB2_LO 0xf7f0 #define AARCH32_BREAK_THUMB2_HI 0xa000 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ @@ -88,5 +88,5 @@ static inline bool try_step_suspended_breakpoints(struct pt_regs *regs) bool try_handle_aarch32_break(struct pt_regs *regs); -#endif /* __ASSEMBLY */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index bcd5622aa096..aa91165ca140 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -126,21 +126,14 @@ static inline void efi_set_pgd(struct mm_struct *mm) if (mm != current->active_mm) { /* * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Enable - * access to the valid TTBR0_EL1 and invoke the errata - * workaround directly since there is no return from - * exception when invoking the EFI run-time services. + * restored as part of a return from exception. */ update_saved_ttbr0(current, mm); - uaccess_ttbr0_enable(); - post_ttbr_update_workaround(); } else { /* - * Defer the switch to the current thread's TTBR0_EL1 - * until uaccess_enable(). Restore the current - * thread's saved ttbr0 corresponding to its active_mm + * Restore the current thread's saved ttbr0 + * corresponding to its active_mm */ - uaccess_ttbr0_disable(); update_saved_ttbr0(current, current->active_mm); } } diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b37da3ee8529..cb0d72401d45 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -7,7 +7,7 @@ #ifndef __ARM_KVM_INIT_H__ #define __ARM_KVM_INIT_H__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #error Assembly-only header #endif @@ -28,7 +28,7 @@ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but * don't advertise it (they predate this relaxation). * - * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H + * Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H * indicating whether the CPU is running in E2H mode. */ mrs_s x1, SYS_ID_AA64MMFR4_EL1 diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 3f93f4eef953..d2779d604c7b 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -133,7 +133,7 @@ #define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3) #endif /* CONFIG_ARM64_FORCE_52BIT */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <uapi/linux/elf.h> #include <linux/bug.h> @@ -293,6 +293,6 @@ static inline int arch_check_elf(void *ehdr, bool has_interp, return 0; } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index e1deed824464..4975a92cbd17 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -431,7 +431,7 @@ #define ESR_ELx_IT_GCSPOPCX 6 #define ESR_ELx_IT_GCSPOPX 7 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> static inline unsigned long esr_brk_comment(unsigned long esr) @@ -534,6 +534,6 @@ static inline bool esr_iss_is_eretab(unsigned long esr) } const char *esr_get_class_string(unsigned long esr); -#endif /* __ASSEMBLY */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_ESR_H */ diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 635a43c4ec85..65555284446e 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -15,7 +15,7 @@ #ifndef _ASM_ARM64_FIXMAP_H #define _ASM_ARM64_FIXMAP_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/kernel.h> #include <linux/math.h> #include <linux/sizes.h> @@ -117,5 +117,5 @@ extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t pr #include <asm-generic/fixmap.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_ARM64_FIXMAP_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b8cf0ea43cc0..1d2e33559bd5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -12,7 +12,7 @@ #include <asm/sigcontext.h> #include <asm/sysreg.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bitmap.h> #include <linux/build_bug.h> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index ba7cf7fec5e9..1621c84f44b3 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -37,7 +37,7 @@ */ #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compat.h> extern void _mcount(unsigned long); @@ -217,9 +217,9 @@ static inline bool arch_syscall_match_sym_name(const char *sym, */ return !strcmp(sym + 8, name); } -#endif /* ifndef __ASSEMBLY__ */ +#endif /* ifndef __ASSEMBLER__ */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_FUNCTION_GRAPH_TRACER void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, diff --git a/arch/arm64/include/asm/gpr-num.h b/arch/arm64/include/asm/gpr-num.h index 05da4a7c5788..a114e4f8209b 100644 --- a/arch/arm64/include/asm/gpr-num.h +++ b/arch/arm64/include/asm/gpr-num.h @@ -2,7 +2,7 @@ #ifndef __ASM_GPR_NUM_H #define __ASM_GPR_NUM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 .equ .L__gpr_num_x\num, \num @@ -11,7 +11,7 @@ .equ .L__gpr_num_xzr, 31 .equ .L__gpr_num_wzr, 31 -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #define __DEFINE_ASM_GPR_NUMS \ " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ @@ -21,6 +21,6 @@ " .equ .L__gpr_num_xzr, 31\n" \ " .equ .L__gpr_num_wzr, 31\n" -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_GPR_NUM_H */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 6d567265467c..1f63814ae6c4 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -46,7 +46,7 @@ #define COMPAT_HWCAP2_SB (1 << 5) #define COMPAT_HWCAP2_SSBS (1 << 6) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/log2.h> /* diff --git a/arch/arm64/include/asm/image.h b/arch/arm64/include/asm/image.h index c09cf942dc92..9ba85173f857 100644 --- a/arch/arm64/include/asm/image.h +++ b/arch/arm64/include/asm/image.h @@ -20,7 +20,7 @@ #define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3 #define ARM64_IMAGE_FLAG_PHYS_BASE 1 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define arm64_image_flag_field(flags, field) \ (((flags) >> field##_SHIFT) & field##_MASK) @@ -54,6 +54,6 @@ struct arm64_image_header { __le32 res5; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_IMAGE_H */ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 18c7811774d3..e1d30ba99d01 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -12,7 +12,7 @@ #include <asm/insn-def.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ enum aarch64_insn_hint_cr_op { AARCH64_INSN_HINT_NOP = 0x0 << 5, @@ -730,6 +730,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn); typedef bool (pstate_check_t)(unsigned long); extern pstate_check_t * const aarch32_opcode_cond_checks[16]; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 424ed421cd97..0cb211d3607d 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -8,7 +8,7 @@ #ifndef __ASM_JUMP_LABEL_H #define __ASM_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <asm/insn.h> @@ -58,5 +58,5 @@ l_yes: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index e1b57c13f8a4..b167e9d3da91 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -2,7 +2,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/linkage.h> #include <asm/memory.h> diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 4d9cc7a76d9c..892e5bebda95 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -25,7 +25,7 @@ #define KEXEC_ARCH KEXEC_ARCH_AARCH64 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /** * crash_setup_regs() - save registers for the panic kernel @@ -130,6 +130,6 @@ extern int load_other_segments(struct kimage *image, char *cmdline); #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 3184f5d1e3ae..67ef1c5532ae 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h @@ -14,7 +14,7 @@ #include <linux/ptrace.h> #include <asm/debug-monitors.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ static inline void arch_kgdb_breakpoint(void) { @@ -36,7 +36,7 @@ static inline int kgdb_single_step_handler(struct pt_regs *regs, } #endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* * gdb remote procotol (well most versions of it) expects the following diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 9da54d4ee49e..4b34f7b7ed2f 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -46,7 +46,7 @@ #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/mm.h> @@ -303,7 +303,7 @@ void kvm_compute_final_ctr_el0(struct alt_instr *alt, void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar); -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ .macro get_host_ctxt reg, tmp adr_this_cpu \reg, kvm_host_data, \tmp diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e4069f2ce642..2dc5e6e742bb 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -49,7 +49,7 @@ * mappings, and none of this applies in that case. */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/alternative.h> @@ -396,5 +396,5 @@ void kvm_s2_ptdump_create_debugfs(struct kvm *kvm); static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {} #endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_mte.h b/arch/arm64/include/asm/kvm_mte.h index de002636eb1f..3171963ad25c 100644 --- a/arch/arm64/include/asm/kvm_mte.h +++ b/arch/arm64/include/asm/kvm_mte.h @@ -5,7 +5,7 @@ #ifndef __ASM_KVM_MTE_H #define __ASM_KVM_MTE_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/sysreg.h> @@ -62,5 +62,5 @@ alternative_else_nop_endif .endm #endif /* CONFIG_ARM64_MTE */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_KVM_MTE_H */ diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h index 6199c9f7ec6e..e50987b32483 100644 --- a/arch/arm64/include/asm/kvm_ptrauth.h +++ b/arch/arm64/include/asm/kvm_ptrauth.h @@ -8,7 +8,7 @@ #ifndef __ASM_KVM_PTRAUTH_H #define __ASM_KVM_PTRAUTH_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/sysreg.h> @@ -100,7 +100,7 @@ alternative_else_nop_endif .endm #endif /* CONFIG_ARM64_PTR_AUTH */ -#else /* !__ASSEMBLY */ +#else /* !__ASSEMBLER__ */ #define __ptrauth_save_key(ctxt, key) \ do { \ @@ -120,5 +120,5 @@ alternative_else_nop_endif __ptrauth_save_key(ctxt, APGA); \ } while(0) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_KVM_PTRAUTH_H */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index d3acd9c87509..40bd17add539 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -1,7 +1,7 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/assembler.h> #endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f1505c4acb38..9d54b2ea49d6 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -207,7 +207,7 @@ */ #define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bitops.h> #include <linux/compiler.h> @@ -392,7 +392,6 @@ static inline unsigned long virt_to_pfn(const void *kaddr) * virt_to_page(x) convert a _valid_ virtual address to struct page * * virt_addr_valid(x) indicates whether a virtual address is valid */ -#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #if defined(CONFIG_DEBUG_VIRTUAL) #define page_to_virt(x) ({ \ @@ -422,7 +421,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr) }) void dump_mem_limit(void); -#endif /* !ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ /* * Given that the GIC architecture permits ITS implementations that can only be diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 78a4dbf75e60..137a173df1ff 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -12,7 +12,7 @@ #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) #define TTBR_ASID_MASK (UL(0xffff) << 48) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/refcount.h> #include <asm/cpufeature.h> @@ -112,5 +112,5 @@ void kpti_install_ng_mappings(void); static inline void kpti_install_ng_mappings(void) {} #endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0dbe3b29049b..d809355f9613 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -8,7 +8,7 @@ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compiler.h> #include <linux/sched.h> @@ -62,11 +62,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) } /* - * TCR.T0SZ value to use when the ID map is active. - */ -#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS) - -/* * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) @@ -82,9 +77,6 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) isb(); } -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) -#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) - /* * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. * @@ -103,7 +95,7 @@ static inline void cpu_uninstall_idmap(void) cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - cpu_set_default_tcr_t0sz(); + __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)); if (mm != &init_mm && !system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); @@ -113,7 +105,7 @@ static inline void cpu_install_idmap(void) { cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - cpu_set_idmap_tcr_t0sz(); + __cpu_set_tcr_t0sz(TCR_T0SZ(IDMAP_VA_BITS)); cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } @@ -330,6 +322,6 @@ static inline void deactivate_mm(struct task_struct *tsk, #include <asm-generic/mmu_context.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 0f9b08e8fb8d..352139271918 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -9,7 +9,7 @@ #include <asm/cputype.h> #include <asm/mte-def.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -259,6 +259,6 @@ static inline int mte_enable_kernel_store_only(void) #endif /* CONFIG_ARM64_MTE */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_MTE_KASAN_H */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 3b5069f4683d..6d4a78b9dc3e 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -8,7 +8,7 @@ #include <asm/compiler.h> #include <asm/mte-def.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bitfield.h> #include <linux/kasan-enabled.h> @@ -282,5 +282,5 @@ static inline void mte_check_tfsr_exit(void) } #endif /* CONFIG_KASAN_HW_TAGS */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_MTE_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595f..0370a1534abc 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -10,7 +10,7 @@ #include <asm/page-def.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/personality.h> /* for READ_IMPLIES_EXEC */ #include <linux/types.h> /* for gfp_t */ @@ -45,7 +45,7 @@ int pfn_is_map_memory(unsigned long pfn); #include <asm/memory.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 85dceb1c66f4..28460c826298 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -62,7 +62,7 @@ #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/cpufeature.h> #include <asm/pgtable-types.h> @@ -127,7 +127,7 @@ static inline bool __pure lpa2_is_enabled(void) #define PAGE_READONLY_EXEC __pgprot(_PAGE_READONLY_EXEC) #define PAGE_EXECONLY __pgprot(_PAGE_EXECONLY) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define pte_pi_index(pte) ( \ ((pte & BIT(PTE_PI_IDX_3)) >> (PTE_PI_IDX_3 - 3)) | \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index aa89c2e67ebc..65afae7c1854 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -30,7 +30,7 @@ #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/cmpxchg.h> #include <asm/fixmap.h> @@ -130,12 +130,16 @@ static inline void arch_leave_lazy_mmu_mode(void) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * Outside of a few very special situations (e.g. hibernation), we always - * use broadcast TLB invalidation instructions, therefore a spurious page - * fault on one CPU which has been handled concurrently by another CPU - * does not need to perform additional invalidation. + * We use local TLB invalidation instruction when reusing page in + * write protection fault handler to avoid TLBI broadcast in the hot + * path. This will cause spurious page faults if stale read-only TLB + * entries exist. */ -#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) +#define flush_tlb_fix_spurious_fault(vma, address, ptep) \ + local_flush_tlb_page_nonotify(vma, address) + +#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ + local_flush_tlb_page_nonotify(vma, address) /* * ZERO_PAGE is a global shared page that is always zero: used @@ -432,7 +436,7 @@ bool pgattr_change_is_safe(pteval_t old, pteval_t new); * 1 0 | 1 0 1 * 1 1 | 0 1 x * - * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via + * When hardware DBM is not present, the software PTE_DIRTY bit is updated via * the page fault mechanism. Checking the dirty status of a pte becomes: * * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) @@ -598,7 +602,7 @@ static inline int pte_protnone(pte_t pte) /* * pte_present_invalid() tells us that the pte is invalid from HW * perspective but present from SW perspective, so the fields are to be - * interpretted as per the HW layout. The second 2 checks are the unique + * interpreted as per the HW layout. The second 2 checks are the unique * encoding that we use for PROT_NONE. It is insufficient to only use * the first check because we share the same encoding scheme with pmds * which support pmd_mkinvalid(), so can be present-invalid without @@ -1948,6 +1952,6 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, #endif /* CONFIG_ARM64_CONTPTE */ -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 0d5d1f0525eb..ab78a78821a2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -9,7 +9,7 @@ #ifndef __ASM_PROCFNS_H #define __ASM_PROCFNS_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/page.h> @@ -21,5 +21,5 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 61d62bfd5a7b..5acce7962228 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -25,7 +25,7 @@ #define MTE_CTRL_STORE_ONLY (1UL << 19) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/build_bug.h> #include <linux/cache.h> @@ -437,5 +437,5 @@ int set_tsc_mode(unsigned int val); #define GET_TSC_CTL(adr) get_tsc_mode((adr)) #define SET_TSC_CTL(val) set_tsc_mode((val)) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 65b053a24d82..39582511ad72 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -94,7 +94,7 @@ */ #define NO_SYSCALL (-1) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bug.h> #include <linux/types.h> @@ -361,5 +361,5 @@ static inline void procedure_link_pointer_set(struct pt_regs *regs, extern unsigned long profile_pc(struct pt_regs *regs); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/rsi_smc.h b/arch/arm64/include/asm/rsi_smc.h index 6cb070eca9e9..e19253f96c94 100644 --- a/arch/arm64/include/asm/rsi_smc.h +++ b/arch/arm64/include/asm/rsi_smc.h @@ -122,7 +122,7 @@ */ #define SMC_RSI_ATTESTATION_TOKEN_CONTINUE SMC_RSI_FID(0x195) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct realm_config { union { @@ -142,7 +142,7 @@ struct realm_config { */ } __aligned(0x1000); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* * Read configuration for the current Realm. diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h index 97d9256d33c9..78beceec10cd 100644 --- a/arch/arm64/include/asm/rwonce.h +++ b/arch/arm64/include/asm/rwonce.h @@ -5,7 +5,7 @@ #ifndef __ASM_RWONCE_H #define __ASM_RWONCE_H -#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__) +#if defined(CONFIG_LTO) && !defined(__ASSEMBLER__) #include <linux/compiler_types.h> #include <asm/alternative-macros.h> @@ -62,7 +62,7 @@ }) #endif /* !BUILD_VDSO */ -#endif /* CONFIG_LTO && !__ASSEMBLY__ */ +#endif /* CONFIG_LTO && !__ASSEMBLER__ */ #include <asm-generic/rwonce.h> diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index a76f9b387a26..d31b128f683f 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -2,7 +2,7 @@ #ifndef _ASM_SCS_H #define _ASM_SCS_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/asm-offsets.h> #include <asm/sysreg.h> @@ -55,6 +55,6 @@ enum { int __pi_scs_patch(const u8 eh_frame[], int size); -#endif /* __ASSEMBLY __ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_SCS_H */ diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 484cb6972e99..b2248bd3cb58 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -9,7 +9,7 @@ #define SDEI_STACK_SIZE IRQ_STACK_SIZE -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/linkage.h> #include <linux/preempt.h> @@ -49,5 +49,5 @@ unsigned long do_sdei_event(struct pt_regs *regs, unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_SDEI_H */ diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 8e86c9e70e48..abd642c92f86 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,7 +29,7 @@ static __must_check inline bool may_use_simd(void) */ return !WARN_ON(!system_capabilities_finalized()) && system_supports_fpsimd() && - !in_hardirq() && !irqs_disabled() && !in_nmi(); + !in_hardirq() && !in_nmi(); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index d48ef6d5abcc..10ea4f543069 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -23,7 +23,7 @@ #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/threads.h> #include <linux/cpumask.h> @@ -155,6 +155,6 @@ bool cpus_are_stuck_in_kernel(void); extern void crash_smp_send_stop(void); extern bool smp_crash_stop_failed(void); -#endif /* ifndef __ASSEMBLY__ */ +#endif /* ifndef __ASSEMBLER__ */ #endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index 8fef12626090..0527c53b0ec5 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -12,7 +12,7 @@ #define BP_HARDEN_EL2_SLOTS 4 #define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/smp.h> #include <asm/percpu.h> @@ -118,5 +118,5 @@ void spectre_bhb_patch_wa3(struct alt_instr *alt, void spectre_bhb_patch_clearbhb(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_SPECTRE_H */ diff --git a/arch/arm64/include/asm/stacktrace/frame.h b/arch/arm64/include/asm/stacktrace/frame.h index 0ee0f6ba0fd8..796797b8db7e 100644 --- a/arch/arm64/include/asm/stacktrace/frame.h +++ b/arch/arm64/include/asm/stacktrace/frame.h @@ -25,7 +25,7 @@ #define FRAME_META_TYPE_FINAL 1 #define FRAME_META_TYPE_PT_REGS 2 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * A standard AAPCS64 frame record. */ @@ -43,6 +43,6 @@ struct frame_record_meta { struct frame_record record; u64 type; }; -#endif /* __ASSEMBLY */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_STACKTRACE_FRAME_H */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 0cde2f473971..e65f33edf9d6 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -23,7 +23,7 @@ struct cpu_suspend_ctx { * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter(). * This data must survive until cpu_resume() is called. * - * This struct desribes the size and the layout of the saved cpu state. + * This struct describes the size and the layout of the saved cpu state. * The layout of the callee_saved_regs is defined by the implementation * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6455db1b54fd..9c9a96643412 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -52,7 +52,7 @@ #ifndef CONFIG_BROKEN_GAS_INST -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ // The space separator is omitted so that __emit_inst(x) can be parsed as // either an assembler directive or an assembler macro argument. #define __emit_inst(x) .inst(x) @@ -71,11 +71,11 @@ (((x) >> 24) & 0x000000ff)) #endif /* CONFIG_CPU_BIG_ENDIAN */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __emit_inst(x) .long __INSTR_BSWAP(x) -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t" -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* CONFIG_BROKEN_GAS_INST */ @@ -1131,7 +1131,7 @@ #define ARM64_FEATURE_FIELD_BITS 4 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro mrs_s, rt, sreg __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt)) diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 344b1c1a4bbb..d316a804eb38 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -7,7 +7,7 @@ #ifndef __ASM_SYSTEM_MISC_H #define __ASM_SYSTEM_MISC_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compiler.h> #include <linux/linkage.h> @@ -28,6 +28,6 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, struct mm_struct; extern void __show_regs(struct pt_regs *); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_SYSTEM_MISC_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index f241b8601ebd..a803b887b0b4 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -10,7 +10,7 @@ #include <linux/compiler.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 18a5dc0c9a54..a2d65d7d6aae 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -8,7 +8,7 @@ #ifndef __ASM_TLBFLUSH_H #define __ASM_TLBFLUSH_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bitfield.h> #include <linux/mm_types.h> @@ -249,6 +249,19 @@ static inline unsigned long get_trans_granule(void) * cannot be easily determined, the value TLBI_TTL_UNKNOWN will * perform a non-hinted invalidation. * + * local_flush_tlb_page(vma, addr) + * Local variant of flush_tlb_page(). Stale TLB entries may + * remain in remote CPUs. + * + * local_flush_tlb_page_nonotify(vma, addr) + * Same as local_flush_tlb_page() except MMU notifier will not be + * called. + * + * local_flush_tlb_contpte(vma, addr) + * Invalidate the virtual-address range + * '[addr, addr+CONT_PTE_SIZE)' mapped with contpte on local CPU + * for the user address space corresponding to 'vma->mm'. Stale + * TLB entries may remain in remote CPUs. * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -282,6 +295,33 @@ static inline void flush_tlb_mm(struct mm_struct *mm) mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } +static inline void __local_flush_tlb_page_nonotify_nosync(struct mm_struct *mm, + unsigned long uaddr) +{ + unsigned long addr; + + dsb(nshst); + addr = __TLBI_VADDR(uaddr, ASID(mm)); + __tlbi(vale1, addr); + __tlbi_user(vale1, addr); +} + +static inline void local_flush_tlb_page_nonotify(struct vm_area_struct *vma, + unsigned long uaddr) +{ + __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr); + dsb(nsh); +} + +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr); + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK, + (uaddr & PAGE_MASK) + PAGE_SIZE); + dsb(nsh); +} + static inline void __flush_tlb_page_nosync(struct mm_struct *mm, unsigned long uaddr) { @@ -472,6 +512,22 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ish); } +static inline void local_flush_tlb_contpte(struct vm_area_struct *vma, + unsigned long addr) +{ + unsigned long asid; + + addr = round_down(addr, CONT_PTE_SIZE); + + dsb(nshst); + asid = ASID(vma->vm_mm); + __flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid, + 3, true, lpa2_is_enabled()); + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr, + addr + CONT_PTE_SIZE); + dsb(nsh); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -524,6 +580,33 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b { __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3); } + +static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval) +{ + ptdesc_t diff = oldval ^ newval; + + /* invalid to valid transition requires no flush */ + if (!(oldval & PTE_VALID)) + return false; + + /* Transition in the SW bits requires no flush */ + diff &= ~PTE_SWBITS_MASK; + + return diff; +} + +static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) +{ + return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte)); +} +#define pte_needs_flush pte_needs_flush + +static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) +{ + return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd)); +} +#define huge_pmd_needs_flush huge_pmd_needs_flush + #endif #endif diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 61679070f595..232b46969088 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -7,7 +7,7 @@ #define __VDSO_PAGES 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <generated/vdso-offsets.h> @@ -19,6 +19,6 @@ extern char vdso_start[], vdso_end[]; extern char vdso32_start[], vdso32_end[]; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_H */ diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h index 6d75e03d3827..d7ebe7ceefa0 100644 --- a/arch/arm64/include/asm/vdso/compat_barrier.h +++ b/arch/arm64/include/asm/vdso/compat_barrier.h @@ -5,7 +5,7 @@ #ifndef __COMPAT_BARRIER_H #define __COMPAT_BARRIER_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * Warning: This code is meant to be used from the compat vDSO only. */ @@ -31,6 +31,6 @@ #define smp_rmb() aarch32_smp_rmb() #define smp_wmb() aarch32_smp_wmb() -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __COMPAT_BARRIER_H */ diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index 7d1a116549b1..0d513f924321 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -5,7 +5,7 @@ #ifndef __ASM_VDSO_COMPAT_GETTIMEOFDAY_H #define __ASM_VDSO_COMPAT_GETTIMEOFDAY_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/barrier.h> #include <asm/unistd_compat_32.h> @@ -161,6 +161,6 @@ static inline bool vdso_clocksource_ok(const struct vdso_clock *vc) } #define vdso_clocksource_ok vdso_clocksource_ok -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_COMPAT_GETTIMEOFDAY_H */ diff --git a/arch/arm64/include/asm/vdso/getrandom.h b/arch/arm64/include/asm/vdso/getrandom.h index a2197da1951b..da1d58bbfabe 100644 --- a/arch/arm64/include/asm/vdso/getrandom.h +++ b/arch/arm64/include/asm/vdso/getrandom.h @@ -3,7 +3,7 @@ #ifndef __ASM_VDSO_GETRANDOM_H #define __ASM_VDSO_GETRANDOM_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> #include <asm/vdso/vsyscall.h> @@ -33,6 +33,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns return ret; } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index c59e84105b43..3658a757e255 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -7,7 +7,7 @@ #ifdef __aarch64__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/alternative.h> #include <asm/arch_timer.h> @@ -96,7 +96,7 @@ static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data( #define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data #endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */ -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #else /* !__aarch64__ */ diff --git a/arch/arm64/include/asm/vdso/processor.h b/arch/arm64/include/asm/vdso/processor.h index ff830b766ad2..7abb0cc81cd6 100644 --- a/arch/arm64/include/asm/vdso/processor.h +++ b/arch/arm64/include/asm/vdso/processor.h @@ -5,13 +5,13 @@ #ifndef __ASM_VDSO_PROCESSOR_H #define __ASM_VDSO_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ static inline void cpu_relax(void) { asm volatile("yield" ::: "memory"); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index 417aae5763a8..3f3c8eb74e2e 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -2,7 +2,7 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <vdso/datapage.h> @@ -22,6 +22,6 @@ void __arch_update_vdso_clock(struct vdso_clock *vc) /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index aa280f356b96..530af9620fdb 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -56,7 +56,7 @@ */ #define BOOT_CPU_FLAG_E2H BIT_ULL(32) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/ptrace.h> #include <asm/sections.h> @@ -161,6 +161,6 @@ static inline bool is_hyp_nvhe(void) return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* ! __ASM__VIRT_H */ diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h index 20873099c035..75daee1a07e9 100644 --- a/arch/arm64/include/asm/vmap_stack.h +++ b/arch/arm64/include/asm/vmap_stack.h @@ -3,9 +3,7 @@ #ifndef __ASM_VMAP_STACK_H #define __ASM_VMAP_STACK_H -#include <linux/bug.h> #include <linux/gfp.h> -#include <linux/kconfig.h> #include <linux/vmalloc.h> #include <linux/pgtable.h> #include <asm/memory.h> @@ -19,8 +17,6 @@ static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node) { void *p; - BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK)); - p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node, __builtin_return_address(0)); return kasan_reset_tag(p); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index ed5f3892674c..a792a599b9d6 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -31,7 +31,7 @@ #define KVM_SPSR_FIQ 4 #define KVM_NR_SPSR 5 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/psci.h> #include <linux/types.h> #include <asm/ptrace.h> diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 0f39ba4f3efd..6fed93fb2536 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -80,7 +80,7 @@ #define PTRACE_PEEKMTETAGS 33 #define PTRACE_POKEMTETAGS 34 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * User structures for general purpose, floating point and debug registers. @@ -332,6 +332,6 @@ struct user_gcs { __u64 gcspr_el0; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index d42f7a92238b..e29bf3e2d0cc 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -17,7 +17,7 @@ #ifndef _UAPI__ASM_SIGCONTEXT_H #define _UAPI__ASM_SIGCONTEXT_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -192,7 +192,7 @@ struct gcs_context { __u64 reserved; }; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #include <asm/sve_context.h> diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 7aca29e1d30b..3f83546d016a 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -133,7 +133,7 @@ static int __init acpi_fadt_sanity_check(void) /* * FADT is required on arm64; retrieve it to check its presence - * and carry out revision and ACPI HW reduced compliancy tests + * and carry out revision and ACPI HW reduced compliance tests */ status = acpi_get_table(ACPI_SIG_FADT, 0, &table); if (ACPI_FAILURE(status)) { @@ -439,7 +439,7 @@ int apei_claim_sea(struct pt_regs *regs) irq_work_run(); __irq_exit(); } else { - pr_warn_ratelimited("APEI work queued but not completed"); + pr_warn_ratelimited("APEI work queued but not completed\n"); err = -EINPROGRESS; } } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5ed401ff79e3..77654643b952 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1002,7 +1002,7 @@ static void __init sort_ftr_regs(void) /* * Initialise the CPU feature register from Boot CPU values. - * Also initiliases the strict_mask for the register. + * Also initialises the strict_mask for the register. * Any bits that are not covered by an arm64_ftr_bits entry are considered * RES0 for the system-wide value, and must strictly match. */ diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 6c371b158b99..a81cb4aa4738 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -10,6 +10,7 @@ #include <linux/efi.h> #include <linux/init.h> #include <linux/kmemleak.h> +#include <linux/kthread.h> #include <linux/screen_info.h> #include <linux/vmalloc.h> @@ -165,20 +166,53 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) return s; } -static DEFINE_RAW_SPINLOCK(efi_rt_lock); - void arch_efi_call_virt_setup(void) { - efi_virtmap_load(); - raw_spin_lock(&efi_rt_lock); + efi_runtime_assert_lock_held(); + + if (preemptible() && (current->flags & PF_KTHREAD)) { + /* + * Disable migration to ensure that a preempted EFI runtime + * service call will be resumed on the same CPU. This avoids + * potential issues with EFI runtime calls that are preempted + * while polling for an asynchronous completion of a secure + * firmware call, which may not permit the CPU to change. + */ + migrate_disable(); + kthread_use_mm(&efi_mm); + } else { + efi_virtmap_load(); + } + + /* + * Enable access to the valid TTBR0_EL1 and invoke the errata + * workaround directly since there is no return from exception when + * invoking the EFI run-time services. + */ + uaccess_ttbr0_enable(); + post_ttbr_update_workaround(); + __efi_fpsimd_begin(); } void arch_efi_call_virt_teardown(void) { __efi_fpsimd_end(); - raw_spin_unlock(&efi_rt_lock); - efi_virtmap_unload(); + + /* + * Defer the switch to the current thread's TTBR0_EL1 until + * uaccess_enable(). Do so before efi_virtmap_unload() updates the + * saved TTBR0 value, so the userland page tables are not activated + * inadvertently over the back of an exception. + */ + uaccess_ttbr0_disable(); + + if (preemptible() && (current->flags & PF_KTHREAD)) { + kthread_unuse_mm(&efi_mm); + migrate_enable(); + } else { + efi_virtmap_unload(); + } } asmlinkage u64 *efi_rt_stack_top __ro_after_init; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index f546a914f041..6e8e1e620221 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -34,20 +34,12 @@ * Handle IRQ/context state management when entering from kernel mode. * Before this function is called it is not safe to call regular kernel code, * instrumentable code, or any code which may trigger an exception. - * - * This is intended to match the logic in irqentry_enter(), handling the kernel - * mode transitions only. */ -static __always_inline irqentry_state_t __enter_from_kernel_mode(struct pt_regs *regs) -{ - return irqentry_enter(regs); -} - static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) { irqentry_state_t state; - state = __enter_from_kernel_mode(regs); + state = irqentry_enter(regs); mte_check_tfsr_entry(); mte_disable_tco_entry(current); @@ -58,21 +50,12 @@ static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) * Handle IRQ/context state management when exiting to kernel mode. * After this function returns it is not safe to call regular kernel code, * instrumentable code, or any code which may trigger an exception. - * - * This is intended to match the logic in irqentry_exit(), handling the kernel - * mode transitions only, and with preemption handled elsewhere. */ -static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs, - irqentry_state_t state) -{ - irqentry_exit(regs, state); -} - static void noinstr exit_to_kernel_mode(struct pt_regs *regs, irqentry_state_t state) { mte_check_tfsr_exit(); - __exit_to_kernel_mode(regs, state); + irqentry_exit(regs, state); } /* @@ -80,17 +63,12 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs, * Before this function is called it is not safe to call regular kernel code, * instrumentable code, or any code which may trigger an exception. */ -static __always_inline void __enter_from_user_mode(struct pt_regs *regs) +static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs) { enter_from_user_mode(regs); mte_disable_tco_entry(current); } -static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs) -{ - __enter_from_user_mode(regs); -} - /* * Handle IRQ/context state management when exiting to user mode. * After this function returns it is not safe to call regular kernel code, diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 169ccf600066..025140caafe7 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -94,7 +94,7 @@ SYM_CODE_START(ftrace_caller) stp x29, x30, [sp, #FREGS_SIZE] add x29, sp, #FREGS_SIZE - /* Prepare arguments for the the tracer func */ + /* Prepare arguments for the tracer func */ sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) mov x1, x9 // parent_ip (callsite's LR) mov x3, sp // regs diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index e3f8f51748bc..6d956ed23bd2 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -225,10 +225,21 @@ static void fpsimd_bind_task_to_cpu(void); */ static void get_cpu_fpsimd_context(void) { - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - local_bh_disable(); - else + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + /* + * The softirq subsystem lacks a true unmask/mask API, and + * re-enabling softirq processing using local_bh_enable() will + * not only unmask softirqs, it will also result in immediate + * delivery of any pending softirqs. + * This is undesirable when running with IRQs disabled, but in + * that case, there is no need to mask softirqs in the first + * place, so only bother doing so when IRQs are enabled. + */ + if (!irqs_disabled()) + local_bh_disable(); + } else { preempt_disable(); + } } /* @@ -240,10 +251,12 @@ static void get_cpu_fpsimd_context(void) */ static void put_cpu_fpsimd_context(void) { - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - local_bh_enable(); - else + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + if (!irqs_disabled()) + local_bh_enable(); + } else { preempt_enable(); + } } unsigned int task_get_vl(const struct task_struct *task, enum vec_type type) @@ -1934,11 +1947,11 @@ void __efi_fpsimd_begin(void) if (!system_supports_fpsimd()) return; - WARN_ON(preemptible()); - if (may_use_simd()) { kernel_neon_begin(); } else { + WARN_ON(preemptible()); + /* * If !efi_sve_state, SVE can't be in use yet and doesn't need * preserving: diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 5adad37ab4fa..5a1554a44162 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -492,7 +492,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return ret; /* - * When using mcount, callsites in modules may have been initalized to + * When using mcount, callsites in modules may have been initialized to * call an arbitrary module PLT (which redirects to the _mcount stub) * rather than the ftrace PLT we'll use at runtime (which redirects to * the ftrace trampoline). We can ignore the old PLT when initializing diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index c0065a1d77cf..15dedb385b9e 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -62,7 +62,7 @@ static void __init init_irq_stacks(void) } } -#ifndef CONFIG_PREEMPT_RT +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK static void ____do_softirq(struct pt_regs *regs) { __do_softirq(); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 6f121a0164a4..239c16e3d02f 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -251,7 +251,7 @@ void crash_post_resume(void) * marked as Reserved as memory was allocated via memblock_reserve(). * * In hibernation, the pages which are Reserved and yet "nosave" are excluded - * from the hibernation iamge. crash_is_nosave() does thich check for crash + * from the hibernation image. crash_is_nosave() does thich check for crash * dump kernel and will reduce the total size of hibernation image. */ diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 2799bdb2fb82..941668800aea 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -131,7 +131,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) struct uprobe_task *utask = current->utask; /* - * Task has received a fatal signal, so reset back to probbed + * Task has received a fatal signal, so reset back to probed * address. */ instruction_pointer_set(regs, utask->vaddr); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 4b001121c72d..b9bdd83fbbca 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -912,13 +912,39 @@ static int sve_set_common(struct task_struct *target, return -EINVAL; /* - * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by - * vec_set_vector_length(), which will also validate them for us: + * On systems without SVE we accept FPSIMD format writes with + * a VL of 0 to allow exiting streaming mode, otherwise a VL + * is required. */ - ret = vec_set_vector_length(target, type, header.vl, - ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); - if (ret) - return ret; + if (header.vl) { + /* + * If the system does not support SVE we can't + * configure a SVE VL. + */ + if (!system_supports_sve() && type == ARM64_VEC_SVE) + return -EINVAL; + + /* + * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are + * consumed by vec_set_vector_length(), which will + * also validate them for us: + */ + ret = vec_set_vector_length(target, type, header.vl, + ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); + if (ret) + return ret; + } else { + /* If the system supports SVE we require a VL. */ + if (system_supports_sve()) + return -EINVAL; + + /* + * Only FPSIMD formatted data with no flags set is + * supported. + */ + if (header.flags != SVE_PT_REGS_FPSIMD) + return -EINVAL; + } /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */ if (type == ARM64_VEC_SME) { @@ -1016,7 +1042,7 @@ static int sve_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - if (!system_supports_sve()) + if (!system_supports_sve() && !system_supports_sme()) return -EINVAL; return sve_set_common(target, regset, pos, count, kbuf, ubuf, diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 95169f7b6531..778f2a1faac8 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -63,8 +63,6 @@ static void free_sdei_stacks(void) { int cpu; - BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK)); - for_each_possible_cpu(cpu) { _free_sdei_stack(&sdei_stack_normal_ptr, cpu); _free_sdei_stack(&sdei_stack_critical_ptr, cpu); @@ -88,8 +86,6 @@ static int init_sdei_stacks(void) int cpu; int err = 0; - BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK)); - for_each_possible_cpu(cpu) { err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu); if (err) @@ -202,7 +198,7 @@ out_err: /* * do_sdei_event() returns one of: * SDEI_EV_HANDLED - success, return to the interrupted context. - * SDEI_EV_FAILED - failure, return this error code to firmare. + * SDEI_EV_FAILED - failure, return this error code to firmware. * virtual-address - success, return to this address. */ unsigned long __kprobes do_sdei_event(struct pt_regs *regs, diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 68cea3a4a35c..a0bfe624f899 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -350,7 +350,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) /* * Now that the dying CPU is beyond the point of no return w.r.t. - * in-kernel synchronisation, try to get the firwmare to help us to + * in-kernel synchronisation, try to get the firmware to help us to * verify that it has really left the kernel before we consider * clobbering anything it might still be using. */ @@ -523,7 +523,7 @@ int arch_register_cpu(int cpu) /* * Availability of the acpi handle is sufficient to establish - * that _STA has aleady been checked. No need to recheck here. + * that _STA has already been checked. No need to recheck here. */ c->hotpluggable = arch_cpu_is_hotpluggable(cpu); diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index aba7ca6bca2d..c062badd1a56 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -96,7 +96,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, * (Similarly for HVC and SMC elsewhere.) */ - if (flags & _TIF_MTE_ASYNC_FAULT) { + if (unlikely(flags & _TIF_MTE_ASYNC_FAULT)) { /* * Process the asynchronous tag check fault before the actual * syscall. do_notify_resume() will send a signal to userspace diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 681939ef5d16..914282016069 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -922,7 +922,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne __show_regs(regs); /* - * We use nmi_panic to limit the potential for recusive overflows, and + * We use nmi_panic to limit the potential for recursive overflows, and * to get a better stack trace. */ nmi_panic(NULL, "kernel stack overflow"); diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index dbd74e4885e2..ebcb7f1a55ee 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -815,7 +815,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) tpt = tpc = true; /* - * For the poor sods that could not correctly substract one value + * For the poor sods that could not correctly subtract one value * from another, trap the full virtual timer and counter. */ if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer)) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f21d1b7f20f8..69d92e3e3d1a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2441,7 +2441,7 @@ static void kvm_hyp_init_symbols(void) kvm_nvhe_sym(__icache_flags) = __icache_flags; kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; - /* Propagate the FGT state to the the nVHE side */ + /* Propagate the FGT state to the nVHE side */ kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks; kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks; kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks; diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 4e16f9b96f63..0ee7ebb2485e 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -115,7 +115,7 @@ static void ffa_set_retval(struct kvm_cpu_context *ctxt, * * FFA-1.3 introduces 64-bit variants of the CPU cycle management * interfaces. Moreover, FF-A 1.3 clarifies that SMC32 direct requests - * complete with SMC32 direct reponses which *should* allow us use the + * complete with SMC32 direct responses which *should* allow us use the * function ID sent by the caller to determine whether to return x8-x17. * * Note that we also cannot rely on function IDs in the response. diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 7cc964af8d30..9a6a80c3fbe5 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1755,7 +1755,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* * Check if this is non-struct page memory PFN, and cannot support - * CMOs. It could potentially be unsafe to access as cachable. + * CMOs. It could potentially be unsafe to access as cacheable. */ if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) { if (is_vma_cacheable) { diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 7a045cad6bdf..4c16dbbd6e90 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -85,7 +85,7 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) /* * Let's treat memory allocation failures as benign: If we fail to * allocate anything, return an error and keep the allocated array - * alive. Userspace may try to recover by intializing the vcpu + * alive. Userspace may try to recover by initializing the vcpu * again, and there is no reason to affect the whole VM for this. */ num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU; diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index c0557945939c..589bcf878938 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -622,8 +622,7 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma, __ptep_set_access_flags(vma, addr, ptep, entry, 0); if (dirty) - __flush_tlb_range(vma, start_addr, addr, - PAGE_SIZE, true, 3); + local_flush_tlb_contpte(vma, start_addr); } else { __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte); __ptep_set_access_flags(vma, addr, ptep, entry, dirty); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d816ff44faff..4ecdfa6bcdbb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -233,9 +233,13 @@ int __ptep_set_access_flags(struct vm_area_struct *vma, pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); } while (pteval != old_pteval); - /* Invalidate a stale read-only entry */ + /* + * Invalidate the local stale read-only entry. Remote stale entries + * may still cause page faults and be invalidated via + * flush_tlb_fix_spurious_fault(). + */ if (dirty) - flush_tlb_page(vma, address); + local_flush_tlb_page(vma, address); return 1; } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b8d37eb037fc..f604a7983de3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -49,6 +49,8 @@ #define NO_CONT_MAPPINGS BIT(1) #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ +#define INVALID_PHYS_ADDR (-1ULL) + DEFINE_STATIC_KEY_FALSE(arm64_ptdump_lock_key); u64 kimage_voffset __ro_after_init; @@ -194,11 +196,11 @@ static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, } while (ptep++, addr += PAGE_SIZE, addr != end); } -static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, + unsigned long end, phys_addr_t phys, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { unsigned long next; pmd_t pmd = READ_ONCE(*pmdp); @@ -213,6 +215,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, pmdval |= PMD_TABLE_PXN; BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(TABLE_PTE); + if (pte_phys == INVALID_PHYS_ADDR) + return -ENOMEM; ptep = pte_set_fixmap(pte_phys); init_clear_pgtable(ptep); ptep += pte_index(addr); @@ -244,11 +248,13 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, * walker. */ pte_clear_fixmap(); + + return 0; } -static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, - phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) +static int init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) { unsigned long next; @@ -269,22 +275,29 @@ static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), READ_ONCE(pmd_val(*pmdp)))); } else { - alloc_init_cont_pte(pmdp, addr, next, phys, prot, - pgtable_alloc, flags); + int ret; + + ret = alloc_init_cont_pte(pmdp, addr, next, phys, prot, + pgtable_alloc, flags); + if (ret) + return ret; BUG_ON(pmd_val(old_pmd) != 0 && pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); } phys += next - addr; } while (pmdp++, addr = next, addr != end); + + return 0; } -static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, + unsigned long end, phys_addr_t phys, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { + int ret; unsigned long next; pud_t pud = READ_ONCE(*pudp); pmd_t *pmdp; @@ -301,6 +314,8 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, pudval |= PUD_TABLE_PXN; BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(TABLE_PMD); + if (pmd_phys == INVALID_PHYS_ADDR) + return -ENOMEM; pmdp = pmd_set_fixmap(pmd_phys); init_clear_pgtable(pmdp); pmdp += pmd_index(addr); @@ -320,20 +335,26 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags); + ret = init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags); + if (ret) + goto out; pmdp += pmd_index(next) - pmd_index(addr); phys += next - addr; } while (addr = next, addr != end); +out: pmd_clear_fixmap(); + + return ret; } -static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, - phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { + int ret = 0; unsigned long next; p4d_t p4d = READ_ONCE(*p4dp); pud_t *pudp; @@ -346,6 +367,8 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, p4dval |= P4D_TABLE_PXN; BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(TABLE_PUD); + if (pud_phys == INVALID_PHYS_ADDR) + return -ENOMEM; pudp = pud_set_fixmap(pud_phys); init_clear_pgtable(pudp); pudp += pud_index(addr); @@ -375,8 +398,10 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), READ_ONCE(pud_val(*pudp)))); } else { - alloc_init_cont_pmd(pudp, addr, next, phys, prot, - pgtable_alloc, flags); + ret = alloc_init_cont_pmd(pudp, addr, next, phys, prot, + pgtable_alloc, flags); + if (ret) + goto out; BUG_ON(pud_val(old_pud) != 0 && pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); @@ -384,14 +409,18 @@ static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, phys += next - addr; } while (pudp++, addr = next, addr != end); +out: pud_clear_fixmap(); + + return ret; } -static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, - phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { + int ret; unsigned long next; pgd_t pgd = READ_ONCE(*pgdp); p4d_t *p4dp; @@ -404,6 +433,8 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, pgdval |= PGD_TABLE_PXN; BUG_ON(!pgtable_alloc); p4d_phys = pgtable_alloc(TABLE_P4D); + if (p4d_phys == INVALID_PHYS_ADDR) + return -ENOMEM; p4dp = p4d_set_fixmap(p4d_phys); init_clear_pgtable(p4dp); p4dp += p4d_index(addr); @@ -418,8 +449,10 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, next = p4d_addr_end(addr, end); - alloc_init_pud(p4dp, addr, next, phys, prot, - pgtable_alloc, flags); + ret = alloc_init_pud(p4dp, addr, next, phys, prot, + pgtable_alloc, flags); + if (ret) + goto out; BUG_ON(p4d_val(old_p4d) != 0 && p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp))); @@ -427,15 +460,19 @@ static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, phys += next - addr; } while (p4dp++, addr = next, addr != end); +out: p4d_clear_fixmap(); + + return ret; } -static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, - unsigned long virt, phys_addr_t size, - pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { + int ret; unsigned long addr, end, next; pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); @@ -444,7 +481,7 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, * within a page, we cannot map the region as the caller expects. */ if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) - return; + return -EINVAL; phys &= PAGE_MASK; addr = virt & PAGE_MASK; @@ -452,25 +489,45 @@ static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, do { next = pgd_addr_end(addr, end); - alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc, - flags); + ret = alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc, + flags); + if (ret) + return ret; phys += next - addr; } while (pgdp++, addr = next, addr != end); + + return 0; } -static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, - unsigned long virt, phys_addr_t size, - pgprot_t prot, - phys_addr_t (*pgtable_alloc)(enum pgtable_type), - int flags) +static int __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { + int ret; + mutex_lock(&fixmap_lock); - __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, - pgtable_alloc, flags); + ret = __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, + pgtable_alloc, flags); mutex_unlock(&fixmap_lock); + + return ret; } -#define INVALID_PHYS_ADDR (-1ULL) +static void early_create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) +{ + int ret; + + ret = __create_pgd_mapping(pgdir, phys, virt, size, prot, pgtable_alloc, + flags); + if (ret) + panic("Failed to create page tables\n"); +} static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, enum pgtable_type pgtable_type) @@ -503,7 +560,7 @@ static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp, } static phys_addr_t -try_pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type, gfp_t gfp) +pgd_pgtable_alloc_init_mm_gfp(enum pgtable_type pgtable_type, gfp_t gfp) { return __pgd_pgtable_alloc(&init_mm, gfp, pgtable_type); } @@ -511,21 +568,13 @@ try_pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type, gfp_t gfp) static phys_addr_t __maybe_unused pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type) { - phys_addr_t pa; - - pa = __pgd_pgtable_alloc(&init_mm, GFP_PGTABLE_KERNEL, pgtable_type); - BUG_ON(pa == INVALID_PHYS_ADDR); - return pa; + return pgd_pgtable_alloc_init_mm_gfp(pgtable_type, GFP_PGTABLE_KERNEL); } static phys_addr_t pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type) { - phys_addr_t pa; - - pa = __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type); - BUG_ON(pa == INVALID_PHYS_ADDR); - return pa; + return __pgd_pgtable_alloc(NULL, GFP_PGTABLE_KERNEL, pgtable_type); } static void split_contpte(pte_t *ptep) @@ -546,7 +595,7 @@ static int split_pmd(pmd_t *pmdp, pmd_t pmd, gfp_t gfp, bool to_cont) pte_t *ptep; int i; - pte_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PTE, gfp); + pte_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PTE, gfp); if (pte_phys == INVALID_PHYS_ADDR) return -ENOMEM; ptep = (pte_t *)phys_to_virt(pte_phys); @@ -591,7 +640,7 @@ static int split_pud(pud_t *pudp, pud_t pud, gfp_t gfp, bool to_cont) pmd_t *pmdp; int i; - pmd_phys = try_pgd_pgtable_alloc_init_mm(TABLE_PMD, gfp); + pmd_phys = pgd_pgtable_alloc_init_mm_gfp(TABLE_PMD, gfp); if (pmd_phys == INVALID_PHYS_ADDR) return -ENOMEM; pmdp = (pmd_t *)phys_to_virt(pmd_phys); @@ -903,8 +952,8 @@ void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, &phys, virt); return; } - __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, - NO_CONT_MAPPINGS); + early_create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, + NO_CONT_MAPPINGS); } void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, @@ -918,8 +967,8 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, if (page_mappings_only) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; - __create_pgd_mapping(mm->pgd, phys, virt, size, prot, - pgd_pgtable_alloc_special_mm, flags); + early_create_pgd_mapping(mm->pgd, phys, virt, size, prot, + pgd_pgtable_alloc_special_mm, flags); } static void update_mapping_prot(phys_addr_t phys, unsigned long virt, @@ -931,8 +980,8 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, return; } - __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, - NO_CONT_MAPPINGS); + early_create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, + NO_CONT_MAPPINGS); /* flush the TLBs after updating live kernel mappings */ flush_tlb_kernel_range(virt, virt + size); @@ -941,8 +990,8 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, phys_addr_t end, pgprot_t prot, int flags) { - __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, - prot, early_pgtable_alloc, flags); + early_create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, + prot, early_pgtable_alloc, flags); } void __init mark_linear_text_alias_ro(void) @@ -1158,6 +1207,8 @@ static int __init __kpti_install_ng_mappings(void *__unused) remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); if (!cpu) { + int ret; + alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd); @@ -1178,9 +1229,11 @@ static int __init __kpti_install_ng_mappings(void *__unused) // covers the PTE[] page itself, the remaining entries are free // to be used as a ad-hoc fixmap. // - __create_pgd_mapping_locked(kpti_ng_temp_pgd, __pa(alloc), - KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, - kpti_ng_pgd_alloc, 0); + ret = __create_pgd_mapping_locked(kpti_ng_temp_pgd, __pa(alloc), + KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, + kpti_ng_pgd_alloc, 0); + if (ret) + panic("Failed to create page tables\n"); } cpu_install_idmap(); @@ -1233,9 +1286,9 @@ static int __init map_entry_trampoline(void) /* Map only the text into the trampoline page table */ memset(tramp_pg_dir, 0, PGD_SIZE); - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, - entry_tramp_text_size(), prot, - pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS); + early_create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, + pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS); /* Map both the text and data into the kernel page table */ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) @@ -1877,23 +1930,28 @@ int arch_add_memory(int nid, u64 start, u64 size, if (force_pte_mapping()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; - __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), - size, params->pgprot, pgd_pgtable_alloc_init_mm, - flags); + ret = __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), + size, params->pgprot, pgd_pgtable_alloc_init_mm, + flags); + if (ret) + goto err; memblock_clear_nomap(start, size); ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, params); if (ret) - __remove_pgd_mapping(swapper_pg_dir, - __phys_to_virt(start), size); - else { - /* Address of hotplugged memory can be smaller */ - max_pfn = max(max_pfn, PFN_UP(start + size)); - max_low_pfn = max_pfn; - } + goto err; + + /* Address of hotplugged memory can be smaller */ + max_pfn = max(max_pfn, PFN_UP(start + size)); + max_low_pfn = max_pfn; + + return 0; +err: + __remove_pgd_mapping(swapper_pg_dir, + __phys_to_virt(start), size); return ret; } diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 5135f2d66958..fe6fdc6249e3 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -148,7 +148,6 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long size = PAGE_SIZE * numpages; unsigned long end = start + size; struct vm_struct *area; - int i; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -184,8 +183,10 @@ static int change_memory_common(unsigned long addr, int numpages, */ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { - for (i = 0; i < area->nr_pages; i++) { - __change_memory_common((u64)page_address(area->pages[i]), + unsigned long idx = (start - (unsigned long)kasan_reset_tag(area->addr)) + >> PAGE_SHIFT; + for (; numpages; idx++, numpages--) { + __change_memory_common((u64)page_address(area->pages[idx]), PAGE_SIZE, set_mask, clear_mask); } } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 8160cff35089..bf5110b91e2f 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -56,7 +56,7 @@ void __init pgtable_cache_init(void) * With 52-bit physical addresses, the architecture requires the * top-level table to be aligned to at least 64 bytes. */ - BUILD_BUG_ON(PGD_SIZE < 64); + BUILD_BUG_ON(!IS_ALIGNED(PGD_SIZE, 64)); #endif /* diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ab83089c3d8f..26b93e0f11b1 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -3053,7 +3053,7 @@ bool bpf_jit_supports_exceptions(void) /* We unwind through both kernel frames starting from within bpf_throw * call and BPF frames. Therefore we require FP unwinder to be enabled * to walk kernel frames and reach BPF frames in the stack trace. - * ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y + * ARM64 kernel is always compiled with CONFIG_FRAME_POINTER=y */ return true; } |
