diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-11-01 17:54:13 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-11-01 17:54:13 +0100 |
commit | 17bb415fefedb74d2ff02656cf59e4e3ab92ee20 (patch) | |
tree | 0ada27d370a1e2bfab1b57df9bb02a1a20371662 /arch/arm64 | |
parent | ceb2465c51195967f11f6507538579816ac67cb8 (diff) | |
parent | d95bdca75b3fb41bf185efe164e05aed820081a5 (diff) |
Merge tag 'irqchip-fixes-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
Pull irqchip fixes from Marc Zyngier:
- A couple of fixes after the IPI as IRQ patches (Kconfig, bcm2836)
- Two SiFive PLIC fixes (irq_set_affinity, hierarchy handling)
- "unmapped events" handling for the ti-sci-inta controller
- Tidying up for the irq-mst driver (static functions, Kconfig)
- Small cleanup in the Renesas irqpin driver
- STM32 exti can now handle LP timer events
Diffstat (limited to 'arch/arm64')
43 files changed, 431 insertions, 228 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6d232837cbee..d0fdbe5fb32f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -106,6 +106,7 @@ config ARM64 select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_IPI select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 55bc8546d9c7..130569f90c54 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -82,8 +82,8 @@ endif # compiler to generate them and consequently to break the single image contract # we pass it only to the assembler. This option is utilized only in case of non # integrated assemblers. -ifneq ($(CONFIG_AS_HAS_ARMV8_4), y) -branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a +ifeq ($(CONFIG_AS_HAS_PAC), y) +asm-arch := armv8.3-a endif endif @@ -91,7 +91,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y) ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) # make sure to pass the newest target architecture to -march. -KBUILD_CFLAGS += -Wa,-march=armv8.4-a +asm-arch := armv8.4-a +endif + +ifdef asm-arch +KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ + -DARM64_ASM_ARCH='"$(asm-arch)"' endif ifeq ($(CONFIG_SHADOW_CALL_STACK), y) @@ -165,6 +170,8 @@ zinstall install: PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@) # We use MRPROPER_FILES and CLEAN_FILES now archclean: diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 1a39e0ef776b..5b9ec032ce8d 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -686,6 +686,8 @@ clocks = <&pericfg CLK_PERI_MSDC30_0_PD>, <&topckgen CLK_TOP_MSDC50_0_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 34d249d85da7..8eb61dd9921e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -337,8 +337,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03400000 0x0 0x10000>; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA186_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC1>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC1>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>, @@ -366,8 +367,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03420000 0x0 0x10000>; interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA186_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC2>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC2>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>, @@ -390,8 +392,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03440000 0x0 0x10000>; interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA186_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC3>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC3>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>, @@ -416,8 +419,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03460000 0x0 0x10000>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA186_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC4>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_PLLC4_VCO>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 48160f48003a..ca5cb6aef5ee 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -460,8 +460,9 @@ compatible = "nvidia,tegra194-sdhci"; reg = <0x03400000 0x10000>; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA194_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC1>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC1>; reset-names = "sdhci"; interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>, @@ -485,8 +486,9 @@ compatible = "nvidia,tegra194-sdhci"; reg = <0x03440000 0x10000>; interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA194_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC3>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC3>; reset-names = "sdhci"; interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>, @@ -511,8 +513,9 @@ compatible = "nvidia,tegra194-sdhci"; reg = <0x03460000 0x10000>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&bpmp TEGRA194_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC4>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>, <&bpmp TEGRA194_CLK_PLLC4>; assigned-clock-parents = diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 829f786af133..8cca2166a446 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -1194,8 +1194,9 @@ compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 14>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1222,8 +1223,9 @@ compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0200 0x0 0x200>; interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&tegra_car TEGRA210_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC2>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 9>; reset-names = "sdhci"; pinctrl-names = "sdmmc-1v8-drv"; @@ -1239,8 +1241,9 @@ compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0400 0x0 0x200>; interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&tegra_car TEGRA210_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC3>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 69>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1262,8 +1265,9 @@ compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0600 0x0 0x200>; interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&tegra_car TEGRA210_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC4>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 15>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv"; diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 6647ae4f0231..880b9054d75c 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -153,7 +153,7 @@ static inline bool gic_prio_masking_enabled(void) static inline void gic_pmr_mask_irqs(void) { - BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | + BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)); BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); /* @@ -162,6 +162,12 @@ static inline void gic_pmr_mask_irqs(void) * are applied to IRQ priorities */ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); + /* + * Same situation as above, but now we make sure that we can mask + * regular interrupts. + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS | + GIC_PRIO_PSR_I_SET)); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index 51a7ce87cdfe..6fb2e6bcc392 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -2,6 +2,12 @@ #ifndef __ASM_COMPILER_H #define __ASM_COMPILER_H +#ifdef ARM64_ASM_ARCH +#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n" +#else +#define ARM64_ASM_PREAMBLE +#endif + /* * The EL0/EL1 pointer bits used by a pointer authentication code. * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 985493af704b..5ffa4bacdad3 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -13,21 +13,12 @@ #include <asm/kvm_arm.h> #include <asm/sysreg.h> -#define NR_IPI 7 - typedef struct { unsigned int __softirq_pending; - unsigned int ipi_irqs[NR_IPI]; } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ -#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) - -u64 smp_irq_stat_cpu(unsigned int cpu); -#define arch_irq_stat_cpu smp_irq_stat_cpu - #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 struct nmi_ctx { diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h index 8a1ef1907760..a1020285ea75 100644 --- a/arch/arm64/include/asm/irq_work.h +++ b/arch/arm64/include/asm/irq_work.h @@ -2,11 +2,9 @@ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H -#include <asm/smp.h> - static inline bool arch_irq_work_has_interrupt(void) { - return !!__smp_cross_call; + return true; } #endif /* __ASM_IRQ_WORK_H */ diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa4b6521ef14..ff328e5bbb75 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return res; } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + static inline unsigned long arch_local_irq_save(void) { unsigned long flags; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 51c1d9918999..1da8e3dc4455 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -71,11 +71,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fb1a922b31ba..6f98fbd0ac81 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -193,6 +221,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 65568b23868a..e52c927aade5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -473,7 +473,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 966ed30ed5f7..997cf8c8cd52 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -31,9 +31,21 @@ * interrupt disabling temporarily does not rely on IRQ priorities. */ #define GIC_PRIO_IRQON 0xe0 -#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define __GIC_PRIO_IRQOFF_NS 0xa0 #define GIC_PRIO_PSR_I_SET (1 << 4) +#define GIC_PRIO_IRQOFF \ + ({ \ + extern struct static_key_false gic_nonsecure_priorities;\ + u8 __prio = __GIC_PRIO_IRQOFF; \ + \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __prio = __GIC_PRIO_IRQOFF_NS; \ + \ + __prio; \ + }) + /* Additional SPSR bits not exposed in the UABI */ #define PSR_MODE_THREAD_BIT (1 << 0) #define PSR_IL_BIT (1 << 20) diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 0eadbf933e35..2e7f529ec5a6 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -56,27 +56,15 @@ static inline void set_cpu_logical_map(int cpu, u64 hwid) struct seq_file; /* - * generate IPI list text - */ -extern void show_ipi_list(struct seq_file *p, int prec); - -/* - * Called from C code, this handles an IPI. - */ -extern void handle_IPI(int ipinr, struct pt_regs *regs); - -/* * Discover the set of possible CPUs and determine their * SMP operations. */ extern void smp_init_cpus(void); /* - * Provide a function to raise an IPI cross call on CPUs in callmap. + * Register IPI interrupts with the arch SMP code */ -extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); - -extern void (*__smp_cross_call)(const struct cpumask *, unsigned int); +extern void set_smp_ipi_range(int ipi_base, int nr_ipi); /* * Called from the secondary holding pen, this is the secondary CPU entry point. diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index d493174415db..cc3f5a33ff9c 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -28,14 +28,16 @@ * not. The macros handles invoking the asm with or without the * register argument as appropriate. */ -#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ +#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op "\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op, \ ARM64_WORKAROUND_REPEAT_TLBI, \ CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ : : ) -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ +#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op ", %0\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op ", %0", \ ARM64_WORKAROUND_REPEAT_TLBI, \ diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 455966401102..a85174d05473 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -322,7 +322,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) */ if (memblock_is_map_memory(phys)) return (void __iomem *)__phys_to_virt(phys); - /* fall through */ + fallthrough; default: if (region->attribute & EFI_MEMORY_WB) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6bd1d3ad037a..c332d49780dc 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -910,6 +910,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM erratum 1418040", .capability = ARM64_WORKAROUND_1418040, ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), }, #endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a389b999482e..6424584be01e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -686,7 +686,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; - /* Fallthrough */ + fallthrough; case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 393c6fb1f1cb..d0076c2159e6 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -327,7 +327,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) set_bit(ICACHEF_VPIPT, &__icache_flags); break; default: - /* Fallthrough */ case ICACHE_POLICY_VIPT: /* Assume aliasing */ set_bit(ICACHEF_ALIASING, &__icache_flags); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2646178c8329..55af8b504b65 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -170,19 +170,6 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 - .if \regsize == 32 - /* - * If we're returning from a 32-bit task on a system affected by - * 1418040 then re-enable userspace access to the virtual counter. - */ -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - .endif clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 @@ -294,14 +281,6 @@ alternative_else_nop_endif tst x22, #PSR_MODE32_BIT // native task? b.eq 3f -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index af234a1e08b7..712e97c03e54 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp, * level. */ enable_debug_monitors(dbg_el); - /* Fall through */ + fallthrough; case HW_BREAKPOINT_RESTORE: /* Setup the address register. */ write_wb_reg(val_reg, i, info->address); @@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Fallthrough */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Fallthrough */ + fallthrough; default: return -EINVAL; } diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 9e897c500237..98a5215c1598 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -101,8 +101,14 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); /* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI KVM_NVHE_ALIAS(gic_pmr_sync); +/* Static key checked in GIC_PRIO_IRQOFF. */ +KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif +/* EL2 exception handling */ +KVM_NVHE_ALIAS(__start___kvm_ex_table); +KVM_NVHE_ALIAS(__stop___kvm_ex_table); + #endif /* CONFIG_KVM */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 04a327ccf84d..9cf2fb87584a 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -10,10 +10,10 @@ * Copyright (C) 2012 ARM Ltd. */ -#include <linux/kernel_stat.h> #include <linux/irq.h> #include <linux/memory.h> #include <linux/smp.h> +#include <linux/hardirq.h> #include <linux/init.h> #include <linux/irqchip.h> #include <linux/kprobes.h> @@ -22,20 +22,11 @@ #include <asm/daifflags.h> #include <asm/vmap_stack.h> -unsigned long irq_err_count; - /* Only access this in an NMI enter/exit */ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); -int arch_show_interrupts(struct seq_file *p, int prec) -{ - show_ipi_list(p, prec); - seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); - return 0; -} - #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 0ce3a28e3347..2e224435c024 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -305,8 +305,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt_shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt_shndx = i; - else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(secstrings + sechdrs[i].sh_name, + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".text.ftrace_trampoline")) tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1cd1a4d0ed30..2a1ad95d9b2c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 84ec630b8ab5..f1804496b935 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -123,10 +123,8 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU @@ -516,6 +514,39 @@ static void entry_task_switch(struct task_struct *next) } /* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Assuming the virtual counter is enabled at the beginning of times: + * + * - disable access when switching from a 64bit task to a 32bit task + * - enable access when switching from a 32bit task to a 64bit task + */ +static void erratum_1418040_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + bool prev32, next32; + u64 val; + + if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && + cpus_have_const_cap(ARM64_WORKAROUND_1418040))) + return; + + prev32 = is_compat_thread(task_thread_info(prev)); + next32 = is_compat_thread(task_thread_info(next)); + + if (prev32 == next32) + return; + + val = read_sysreg(cntkctl_el1); + + if (!next32) + val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + else + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + + write_sysreg(val, cntkctl_el1); +} + +/* * Thread switching. */ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, @@ -530,6 +561,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); + erratum_1418040_thread_switch(prev, next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 77c4c9bad1b8..53acbeca4f57 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -280,7 +280,6 @@ u64 cpu_logical_map(int cpu) { return __cpu_logical_map[cpu]; } -EXPORT_SYMBOL_GPL(cpu_logical_map); void __init __no_sanitize_address setup_arch(char **cmdline_p) { diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03957a1ae6c0..82e75fc2c903 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -30,6 +30,7 @@ #include <linux/completion.h> #include <linux/of.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/kexec.h> #include <linux/kvm_host.h> @@ -72,10 +73,18 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP + IPI_WAKEUP, + NR_IPI }; +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; + +static void ipi_setup(int cpu); + #ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu); static int op_cpu_kill(unsigned int cpu); #else static inline int op_cpu_kill(unsigned int cpu) @@ -151,7 +160,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) break; } pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ + fallthrough; case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) @@ -237,6 +246,8 @@ asmlinkage notrace void secondary_start_kernel(void) */ notify_cpu_starting(cpu); + ipi_setup(cpu); + store_cpu_topology(cpu); numa_add_cpu(cpu); @@ -302,6 +313,7 @@ int __cpu_disable(void) * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -772,13 +784,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - __smp_cross_call = fn; -} - static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_RESCHEDULE, "Rescheduling interrupts"), @@ -790,35 +795,25 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_WAKEUP, "CPU wake-up interrupts"), }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); -void show_ipi_list(struct seq_file *p, int prec) +unsigned long irq_err_count; + +int arch_show_interrupts(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + unsigned int irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } -} - -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); + return 0; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -841,8 +836,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask) #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { - if (__smp_cross_call) - smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif @@ -890,15 +884,12 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) /* * Main handler for inter-processor interrupts */ -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { + if ((unsigned)ipinr < NR_IPI) trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } switch (ipinr) { case IPI_RESCHEDULE: @@ -906,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); local_cpu_stop(); - irq_exit(); break; case IPI_CPU_CRASH_STOP: if (IS_ENABLED(CONFIG_KEXEC_CORE)) { - irq_enter(); - ipi_cpu_crash_stop(cpu, regs); + ipi_cpu_crash_stop(cpu, get_irq_regs()); unreachable(); } @@ -928,17 +914,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif @@ -957,7 +939,66 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); - set_irq_regs(old_regs); +} + +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} +#endif + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < NR_IPI); + nr_ipi = min(n, NR_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); } void smp_send_reschedule(int cpu) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 5139a5f19256..d6adb4677c25 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ # Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ +quiet_cmd_vdso_install = INSTALL32 $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so vdso.so: $(obj)/vdso.so.dbg diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index ec8e894684a7..7cba7623fcec 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -20,6 +20,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -35,6 +42,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 691d21e4c717..46dc3d75cf13 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1640,6 +1640,10 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)) + kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ + "Only trusted guests should be used on this system.\n"); + for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fe6c7d79309d..5d690d60ccad 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; - /* fall through */ + fallthrough; case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BKPT32: diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ee32a7743389..76e7eaf4675e 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,20 +196,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret + + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 689fccbc9de7..46b4dab933d0 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include <asm/kvm_mmu.h> #include <asm/mmu.h> +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .macro do_el2_call @@ -143,13 +167,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret + +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -157,27 +187,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index 0297dc63988c..5e28ea6aa097 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -21,70 +21,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: ptr[14] = read_debug(reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: ptr[13] = read_debug(reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: ptr[12] = read_debug(reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: ptr[11] = read_debug(reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: ptr[10] = read_debug(reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: ptr[9] = read_debug(reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: ptr[8] = read_debug(reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: ptr[7] = read_debug(reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: ptr[6] = read_debug(reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: ptr[5] = read_debug(reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: ptr[4] = read_debug(reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: ptr[3] = read_debug(reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: ptr[2] = read_debug(reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: ptr[1] = read_debug(reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: write_debug(ptr[14], reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: write_debug(ptr[13], reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: write_debug(ptr[12], reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: write_debug(ptr[11], reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: write_debug(ptr[10], reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: write_debug(ptr[9], reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: write_debug(ptr[8], reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: write_debug(ptr[7], reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: write_debug(ptr[6], reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: write_debug(ptr[5], reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: write_debug(ptr[4], reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: write_debug(ptr[3], reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: write_debug(ptr[2], reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: write_debug(ptr[1], reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 426ef65601dd..5b6b8fa00f0a 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -17,6 +17,7 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/extable.h> #include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> @@ -29,6 +30,9 @@ extern const char __hyp_panic_string[]; +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) #endif } +static inline void __kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} + #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 341be2f2f312..0970442d2dbc 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) read_sysreg(hpfar_el2), par, vcpu); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5a0073511efb..452f4cacd674 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c52d714e0d75..c1da4f86ccac 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) __hyp_call_panic(spsr, elr, par, host_ctxt); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0121ef2c7c8d..ba00bcc0c884 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, + bool may_block) { struct kvm *kvm = mmu->kvm; pgd_t *pgd; @@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(mmu, start, size, true); +} + static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -2208,18 +2214,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(&kvm->arch.mmu, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.mmu.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index a206655a39a5..9b11c096a042 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -45,7 +45,7 @@ static u32 get_cpu_asid_bits(void) default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); - /* Fallthrough */ + fallthrough; case 0: asid = 8; break; |