summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/module.c2
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/alpha/kernel/traps.c4
-rw-r--r--arch/arc/boot/dts/hsdk.dts6
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/kernel/disasm.c2
-rw-r--r--arch/arc/kernel/perf_event.c14
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c77
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arc/mm/init.c27
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts2
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-prtwd2.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi29
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi20
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi2
-rw-r--r--arch/arm/configs/integrator_defconfig16
-rw-r--r--arch/arm/kernel/hw_breakpoint.c10
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/mach-ep93xx/crunch.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c4
-rw-r--r--arch/arm/mach-mmp/pm-mmp2.c8
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c10
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/proc-v7-bugs.c2
-rw-r--r--arch/arm/plat-omap/dma.c6
-rw-r--r--arch/arm/probes/decode.c2
-rw-r--r--arch/arm/probes/kprobes/core.c2
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/Kconfig72
-rw-r--r--arch/arm64/Makefile19
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi20
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi15
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi20
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi43
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi12
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi12
-rw-r--r--arch/arm64/configs/defconfig12
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S4
-rw-r--r--arch/arm64/include/asm/archrandom.h5
-rw-r--r--arch/arm64/include/asm/boot.h3
-rw-r--r--arch/arm64/include/asm/compiler.h6
-rw-r--r--arch/arm64/include/asm/cpu_ops.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h7
-rw-r--r--arch/arm64/include/asm/cpufeature.h32
-rw-r--r--arch/arm64/include/asm/esr.h4
-rw-r--r--arch/arm64/include/asm/exception.h1
-rw-r--r--arch/arm64/include/asm/extable.h9
-rw-r--r--arch/arm64/include/asm/fpsimd.h3
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h48
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/insn.h4
-rw-r--r--arch/arm64/include/asm/irqflags.h5
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h48
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h28
-rw-r--r--arch/arm64/include/asm/kvm_host.h44
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h90
-rw-r--r--arch/arm64/include/asm/memory.h19
-rw-r--r--arch/arm64/include/asm/mman.h56
-rw-r--r--arch/arm64/include/asm/mmu.h14
-rw-r--r--arch/arm64/include/asm/mmu_context.h11
-rw-r--r--arch/arm64/include/asm/mte.h86
-rw-r--r--arch/arm64/include/asm/numa.h3
-rw-r--r--arch/arm64/include/asm/page-def.h5
-rw-r--r--arch/arm64/include/asm/page.h19
-rw-r--r--arch/arm64/include/asm/pci.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h16
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h9
-rw-r--r--arch/arm64/include/asm/pgtable.h110
-rw-r--r--arch/arm64/include/asm/processor.h56
-rw-r--r--arch/arm64/include/asm/spectre.h32
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h87
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/tlbflush.h6
-rw-r--r--arch/arm64/include/asm/traps.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/include/uapi/asm/mman.h1
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/acpi.c24
-rw-r--r--arch/arm64/kernel/cpu-reset.S4
-rw-r--r--arch/arm64/kernel/cpu_errata.c493
-rw-r--r--arch/arm64/kernel/cpufeature.c134
-rw-r--r--arch/arm64/kernel/cpuinfo.c179
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/entry-common.c21
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S25
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/fpsimd.c8
-rw-r--r--arch/arm64/kernel/head.S16
-rw-r--r--arch/arm64/kernel/hibernate.c125
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm64/kernel/image-vars.h6
-rw-r--r--arch/arm64/kernel/image.h1
-rw-r--r--arch/arm64/kernel/insn.c11
-rw-r--r--arch/arm64/kernel/module-plts.c3
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kernel/mte.c336
-rw-r--r--arch/arm64/kernel/paravirt.c26
-rw-r--r--arch/arm64/kernel/perf_callchain.c6
-rw-r--r--arch/arm64/kernel/perf_event.c272
-rw-r--r--arch/arm64/kernel/perf_regs.c2
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c9
-rw-r--r--arch/arm64/kernel/process.c107
-rw-r--r--arch/arm64/kernel/proton-pack.c792
-rw-r--r--arch/arm64/kernel/ptrace.c51
-rw-r--r--arch/arm64/kernel/relocate_kernel.S12
-rw-r--r--arch/arm64/kernel/return_address.c8
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/signal.c12
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/smp_spin_table.c4
-rw-r--r--arch/arm64/kernel/ssbd.c129
-rw-r--r--arch/arm64/kernel/stacktrace.c117
-rw-r--r--arch/arm64/kernel/suspend.c7
-rw-r--r--arch/arm64/kernel/syscall.c10
-rw-r--r--arch/arm64/kernel/topology.c32
-rw-r--r--arch/arm64/kernel/traps.c132
-rw-r--r--arch/arm64/kernel/vdso.c51
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S12
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/arm.c47
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S96
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h60
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h64
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c7
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c9
-rw-r--r--arch/arm64/kvm/hypercalls.c33
-rw-r--r--arch/arm64/kvm/mmu.c31
-rw-r--r--arch/arm64/kvm/pmu-emul.c26
-rw-r--r--arch/arm64/kvm/psci.c74
-rw-r--r--arch/arm64/kvm/pvtime.c29
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c19
-rw-r--r--arch/arm64/kvm/trace_arm.h16
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h6
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c4
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/mte.S151
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/context.c107
-rw-r--r--arch/arm64/mm/copypage.c25
-rw-r--r--arch/arm64/mm/extable.c4
-rw-r--r--arch/arm64/mm/fault.c13
-rw-r--r--arch/arm64/mm/mmu.c20
-rw-r--r--arch/arm64/mm/mteswap.c83
-rw-r--r--arch/arm64/mm/numa.c8
-rw-r--r--arch/arm64/mm/pageattr.c1
-rw-r--r--arch/arm64/mm/proc.S32
-rw-r--r--arch/arm64/mm/ptdump.c (renamed from arch/arm64/mm/dump.c)6
-rw-r--r--arch/arm64/net/bpf_jit_comp.c43
-rw-r--r--arch/c6x/kernel/signal.c4
-rw-r--r--arch/csky/kernel/signal.c2
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/hexagon/kernel/module.c2
-rw-r--r--arch/hexagon/kernel/signal.c2
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h9
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/m68k/Kconfig24
-rw-r--r--arch/m68k/amiga/config.c118
-rw-r--r--arch/m68k/atari/atakeyb.c2
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/include/asm/thread_info.h8
-rw-r--r--arch/m68k/kernel/head.S16
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/kernel/traps.c5
-rw-r--r--arch/m68k/mac/config.c43
-rw-r--r--arch/m68k/mac/macboing.c2
-rw-r--r--arch/m68k/mac/via.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c2
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/kernel/signal.c2
-rw-r--r--arch/microblaze/mm/init.c3
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/irqflags.h5
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/include/asm/unroll.h64
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/traps.c12
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/mips/loongson2ef/Platform4
-rw-r--r--arch/mips/loongson64/cop2-ex.c24
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/sni/a20r.c13
-rw-r--r--arch/nds32/include/asm/irqflags.h5
-rw-r--r--arch/nds32/kernel/fpu.c12
-rw-r--r--arch/nds32/kernel/signal.c4
-rw-r--r--arch/openrisc/include/asm/uaccess.h33
-rw-r--r--arch/openrisc/kernel/setup.c10
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/cputable.h15
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h11
-rw-r--r--arch/powerpc/include/asm/kasan.h9
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/mce.h7
-rw-r--r--arch/powerpc/include/asm/mman.h31
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h5
-rw-r--r--arch/powerpc/include/asm/string.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h40
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h2
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h20
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/dma-iommu.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c6
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/setup-common.c1
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kvm/book3s.c3
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/copy_mc_64.S (renamed from arch/powerpc/lib/memcpy_mcsafe_64.S)4
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c9
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c6
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c15
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c24
-rw-r--r--arch/powerpc/perf/hv-24x7.c11
-rw-r--r--arch/powerpc/perf/imc-pmu.c4
-rw-r--r--arch/powerpc/perf/perf_regs.c44
-rw-r--r--arch/powerpc/perf/power10-pmu.c6
-rw-r--r--arch/powerpc/perf/power9-pmu.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c18
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c1
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/Kconfig.socs2
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dtsi6
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig7
-rw-r--r--arch/riscv/configs/rv32_defconfig5
-rw-r--r--arch/riscv/include/asm/clint.h57
-rw-r--r--arch/riscv/include/asm/ftrace.h7
-rw-r--r--arch/riscv/include/asm/smp.h19
-rw-r--r--arch/riscv/include/asm/stackprotector.h4
-rw-r--r--arch/riscv/include/asm/timex.h62
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/clint.c44
-rw-r--r--arch/riscv/kernel/ftrace.c19
-rw-r--r--arch/riscv/kernel/sbi.c14
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/kernel/smp.c44
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S5
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig3
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/include/asm/percpu.h28
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/idle.c6
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/ptrace.c7
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/mm/fault.c20
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/pci/pci.c26
-rw-r--r--arch/s390/pci/pci_bus.c52
-rw-r--r--arch/s390/pci/pci_bus.h13
-rw-r--r--arch/s390/pci/pci_event.c9
-rw-r--r--arch/sh/drivers/platform_early.c2
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/kernel/disassemble.c4
-rw-r--r--arch/sh/kernel/entry-common.S1
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/ptrace_32.c15
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/auxio_64.c1
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/prom_32.c2
-rw-r--r--arch/sparc/kernel/signal32.c4
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/signal_64.c4
-rw-r--r--arch/sparc/math-emu/math_32.c8
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/boot/cmdline.c4
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/kaslr.c2
-rw-r--r--arch/x86/boot/compressed/misc.c7
-rw-r--r--arch/x86/boot/string.h5
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/entry/calling.h11
-rw-r--r--arch/x86/entry/common.c31
-rw-r--r--arch/x86/entry/entry_64.S7
-rw-r--r--arch/x86/entry/thunk_32.S5
-rw-r--r--arch/x86/events/intel/core.c6
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/uncore_snb.c52
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/copy_mc_test.h75
-rw-r--r--arch/x86/include/asm/cpufeatures.h4
-rw-r--r--arch/x86/include/asm/disabled-features.h9
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/entry-common.h12
-rw-r--r--arch/x86/include/asm/extable.h9
-rw-r--r--arch/x86/include/asm/fpu/api.h12
-rw-r--r--arch/x86/include/asm/fpu/internal.h14
-rw-r--r--arch/x86/include/asm/fpu/types.h11
-rw-r--r--arch/x86/include/asm/fpu/xstate.h2
-rw-r--r--arch/x86/include/asm/frame.h19
-rw-r--r--arch/x86/include/asm/fsgsbase.h4
-rw-r--r--arch/x86/include/asm/idtentry.h6
-rw-r--r--arch/x86/include/asm/io.h17
-rw-r--r--arch/x86/include/asm/irq_stack.h69
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/mce.h33
-rw-r--r--arch/x86/include/asm/mcsafe_test.h75
-rw-r--r--arch/x86/include/asm/mmu.h1
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/special_insns.h70
-rw-r--r--arch/x86/include/asm/string_64.h32
-rw-r--r--arch/x86/include/asm/sync_core.h34
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uaccess.h9
-rw-r--r--arch/x86/include/asm/uaccess_64.h20
-rw-r--r--arch/x86/include/asm/uv/bios.h17
-rw-r--r--arch/x86/include/asm/uv/uv.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h755
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h165
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h6992
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/apic/probe_32.c2
-rw-r--r--arch/x86/kernel/apic/vector.c16
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c822
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c27
-rw-r--r--arch/x86/kernel/cpu/common.c55
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c1
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c44
-rw-r--r--arch/x86/kernel/cpu/mce/core.c188
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c4
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c2
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c2
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h13
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c102
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/fpu/init.c41
-rw-r--r--arch/x86/kernel/fpu/xstate.c63
-rw-r--r--arch/x86/kernel/hw_breakpoint.c2
-rw-r--r--arch/x86/kernel/idt.c3
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/mpparse.c5
-rw-r--r--arch/x86/kernel/msr.c18
-rw-r--r--arch/x86/kernel/nmi.c5
-rw-r--r--arch/x86/kernel/process.c7
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/quirks.c10
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c2
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kernel/stacktrace.c10
-rw-r--r--arch/x86/kernel/traps.c65
-rw-r--r--arch/x86/kernel/umip.c40
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/emulate.c24
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/mmu/mmu.c7
-rw-r--r--arch/x86/kvm/svm/nested.c7
-rw-r--r--arch/x86/kvm/svm/sev.c4
-rw-r--r--arch/x86/kvm/svm/svm.c46
-rw-r--r--arch/x86/kvm/vmx/nested.c10
-rw-r--r--arch/x86/kvm/vmx/vmx.c54
-rw-r--r--arch/x86/kvm/vmx/vmx.h6
-rw-r--r--arch/x86/kvm/x86.c44
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/cmdline.c8
-rw-r--r--arch/x86/lib/copy_mc.c96
-rw-r--r--arch/x86/lib/copy_mc_64.S163
-rw-r--r--arch/x86/lib/copy_user_64.S111
-rw-r--r--arch/x86/lib/insn-eval.c6
-rw-r--r--arch/x86/lib/memcpy_64.S115
-rw-r--r--arch/x86/lib/usercopy_64.c23
-rw-r--r--arch/x86/math-emu/errors.c2
-rw-r--r--arch/x86/math-emu/fpu_trig.c2
-rw-r--r--arch/x86/mm/extable.c24
-rw-r--r--arch/x86/mm/fault.c80
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--arch/x86/mm/tlb.c37
-rw-r--r--arch/x86/pci/xen.c1
-rw-r--r--arch/x86/platform/efi/efi.c69
-rw-r--r--arch/x86/platform/efi/efi_32.c44
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/uv/Makefile2
-rw-r--r--arch/x86/platform/uv/bios_uv.c28
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2097
-rw-r--r--arch/x86/platform/uv/uv_nmi.c65
-rw-r--r--arch/x86/platform/uv/uv_time.c18
-rw-r--r--arch/xtensa/kernel/signal.c2
495 files changed, 10842 insertions, 10084 deletions
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ac110ae8f978..5b60c248de9e 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -212,7 +212,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
- /* FALLTHRU */
+ fallthrough;
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index a813020d2f11..15bc9d1e79f4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -453,7 +453,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
regs->r0 = EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 49754e07e04f..921d4b6e4d95 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -883,7 +883,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x26: /* sts */
fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
- /* FALLTHRU */
+ fallthrough;
case 0x2c: /* stl */
__asm__ __volatile__(
@@ -911,7 +911,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x27: /* stt */
fake_reg = alpha_read_fp_reg(reg);
- /* FALLTHRU */
+ fallthrough;
case 0x2d: /* stq */
__asm__ __volatile__(
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 9acbeba832c0..dcaa44e408ac 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -88,6 +88,8 @@
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
@@ -208,7 +210,7 @@
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
@@ -226,7 +228,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
};
};
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index b747f2ec2928..6147db925248 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -18,10 +18,10 @@
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index d04837d91b40..03f8b1be0c3a 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -339,7 +339,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
- /* intentional fall-through */
+ fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 661fd842ea97..79849f37e782 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, has_interrupts;
+ int i, has_interrupts, irq;
int counter_size; /* in bits */
union cc_name {
@@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.attr_groups = arc_pmu->attr_groups,
};
- if (has_interrupts) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- pr_err("Cannot get IRQ number for the platform\n");
- return -ENODEV;
- }
+ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
arc_pmu->irq = irq;
@@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
-
- } else
+ } else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ }
/*
* perf parser doesn't really like '-' symbol in events name, so let's
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 3d57ed0d8535..8222f8c54690 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -321,7 +321,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
regs->r0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
/*
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 28e8bf04b253..a331bb5d8319 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,44 +18,37 @@
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
-
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
@@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
@@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs)
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" [%2s%2s%2s%2s]",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
- regs->bta, regs->sp, regs->fp, (void *)regs->blink);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
-
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index f87758a6851b..74ad4256022e 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,7 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
- /* Fall through */
+ fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
@@ -827,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -835,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index f886ac69d8ad..3a35b82a718e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -26,8 +26,8 @@ static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
#endif
#ifdef CONFIG_DISCONTIGMEM
@@ -69,6 +69,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1);
+ memblock_reserve(base, size);
#endif
}
@@ -157,7 +158,7 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();
@@ -166,22 +167,26 @@ void __init setup_arch_memory(void)
free_area_init(max_zone_pfn);
}
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
+static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- reset_all_zones_managed_pages();
+ memblock_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
+}
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
memblock_free_all();
+ highmem_init();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index a4a61531c7fb..77712c5ffe84 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -33,7 +33,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 8d19925fc09e..6783cf16ff81 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -116,7 +116,6 @@
switch0: ksz8563@0 {
compatible = "microchip,ksz8563";
reg = <0>;
- phy-mode = "mii";
reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
spi-max-frequency = <500000>;
@@ -140,6 +139,7 @@
reg = <2>;
label = "cpu";
ethernet = <&macb0>;
+ phy-mode = "mii";
fixed-link {
speed = <100>;
full-duplex;
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index cbebed5f050e..e8df458aad39 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -217,7 +217,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 0346ea621f0f..c846fa3c244d 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -284,7 +284,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index f7ae5a4530b8..d94357b21f7e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -13,7 +13,7 @@
soc {
firmware: firmware {
- compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
+ compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 2d9b4dd05830..0016720ce530 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -488,7 +488,7 @@
};
spi@18029200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index 7a3d1d3e54a9..8f94364ba484 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -13,7 +13,7 @@
backlight: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 20000>;
+ pwms = <&pwm3 0 20000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_lcd>;
diff --git a/arch/arm/boot/dts/imx6q-prtwd2.dts b/arch/arm/boot/dts/imx6q-prtwd2.dts
index dffafbcaa7af..349959d38020 100644
--- a/arch/arm/boot/dts/imx6q-prtwd2.dts
+++ b/arch/arm/boot/dts/imx6q-prtwd2.dts
@@ -30,7 +30,7 @@
};
/* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */
- i2c@4 {
+ i2c {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7705285d9e3c..4d01c3300b97 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -22,8 +22,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index 0b02c7e60c17..f4dc46207954 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -1026,7 +1026,7 @@
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1
diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
index e5e20b07f184..7cb6153fc650 100644
--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
@@ -58,7 +58,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&fec1_phy>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 367439639da9..b7ea37ad4e55 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -394,7 +394,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLC>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 0 32>;
+ gpio-ranges = <&iomuxc1 0 0 20>;
};
gpio_ptd: gpio@40af0000 {
@@ -408,7 +408,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLD>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 32 32>;
+ gpio-ranges = <&iomuxc1 0 32 12>;
};
gpio_pte: gpio@40b00000 {
@@ -422,7 +422,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLE>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 64 32>;
+ gpio-ranges = <&iomuxc1 0 64 16>;
};
gpio_ptf: gpio@40b10000 {
@@ -436,7 +436,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLF>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 96 32>;
+ gpio-ranges = <&iomuxc1 0 96 20>;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 100396f6c2fe..395e05f10d36 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -51,6 +51,8 @@
&mcbsp2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
@@ -102,35 +104,18 @@
regulator-max-microvolt = <3300000>;
};
- lcd0: display@0 {
- compatible = "panel-dpi";
- label = "28";
- status = "okay";
- /* default-on; */
+ lcd0: display {
+ /* This isn't the exact LCD, but the timings meet spec */
+ compatible = "logicpd,type28";
pinctrl-names = "default";
pinctrl-0 = <&lcd_enable_pin>;
- enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
+ backlight = <&bl>;
+ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
-
- panel-timing {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <3>;
- hback-porch = <2>;
- hsync-len = <42>;
- vback-porch = <3>;
- vfront-porch = <2>;
- vsync-len = <11>;
- hsync-active = <1>;
- vsync-active = <1>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
};
bl: backlight {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 381f0e82bb70..b0f6613e6d54 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -81,6 +81,8 @@
};
&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 069af9a19bb6..827373ef1a54 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
- <0x0 0x40000000 0x0 0x40000000>;
+ <0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5da9cff7a53c..a82c96258a93 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -488,11 +488,11 @@
};
};
- target-module@5000 {
+ target-module@4000 {
compatible = "ti,sysc-omap2", "ti,sysc";
- reg = <0x5000 0x4>,
- <0x5010 0x4>,
- <0x5014 0x4>;
+ reg = <0x4000 0x4>,
+ <0x4010 0x4>,
+ <0x4014 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
@@ -504,7 +504,7 @@
ti,syss-mask = <1>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x5000 0x1000>;
+ ranges = <0 0x4000 0x1000>;
dsi1: encoder@0 {
compatible = "ti,omap5-dsi";
@@ -514,8 +514,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
@@ -545,8 +546,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index fc4abef143a0..0013ec3463c4 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -821,7 +821,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xffd01000 0x100>;
+ reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
resets = <&rst L4SYSTIMER1_RESET>;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0fe03aa0367f..2259d11af721 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -495,7 +495,7 @@
};
ocotp: ocotp@400a5000 {
- compatible = "fsl,vf610-ocotp";
+ compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index a9755c501bec..b06e537d5149 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,13 +1,11 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
@@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_INTEGRATOR_IMPD1=y
CONFIG_ARCH_INTEGRATOR_CP=y
-CONFIG_PCI=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
-CONFIG_CMA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,6 +33,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
@@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_E100=y
CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7fff88e61252..7a4853b1213a 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp,
if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
- /* Else, fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Else, fall through */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
@@ -884,7 +884,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
- /* Fall through */
+ fallthrough;
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
@@ -933,7 +933,7 @@ static bool core_has_os_save_restore(void)
ARM_DBG_READ(c1, c1, 4, oslsr);
if (oslsr & ARM_OSLSR_OSLM0)
return true;
- /* Else, fall through */
+ fallthrough;
default:
return false;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index c9dc912b83f0..c1892f733f20 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart -= 2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 1c05c5bf7e5c..757032d82f63 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -49,7 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
- /* Fall through */
+ fallthrough;
case THREAD_NOTIFY_EXIT:
crunch_task_release(thread);
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 24dd5bbe60e4..094337dc1bc7 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -24,7 +24,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
imx6_set_lpm(WAIT_UNCLOCKED);
raw_spin_unlock(&cpuidle_lock);
+ rcu_idle_enter();
cpu_do_idle();
+ rcu_idle_exit();
raw_spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus())
@@ -44,7 +46,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
{
.exit_latency = 50,
.target_residency = 75,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE,
.enter = imx6q_enter_wait,
.name = "WAIT",
.desc = "Clock off",
diff --git a/arch/arm/mach-mmp/pm-mmp2.c b/arch/arm/mach-mmp/pm-mmp2.c
index 2d86381e152d..7a6f74c32d42 100644
--- a/arch/arm/mach-mmp/pm-mmp2.c
+++ b/arch/arm/mach-mmp/pm-mmp2.c
@@ -123,19 +123,19 @@ void mmp2_pm_enter_lowpower_mode(int state)
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_CHIP_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN;
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */
apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */
apcr |= MPMU_PCR_PJ_SPSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */
idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK;
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 69ebe18ff209..1d71d73c1862 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -145,23 +145,23 @@ void pxa910_pm_enter_lowpower_mode(int state)
case POWER_MODE_UDR:
/* only shutdown APB in UDR */
apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3)
| APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3);
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_INTIDLE:
break;
}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 1d119b974f5f..59755b5a1ad7 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -396,7 +396,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "3.1";
break;
case 7:
- /* FALLTHROUGH */
default:
/* Use the latest known revision as default */
omap_revision = OMAP3430_REV_ES3_1_2;
@@ -416,7 +415,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
default:
omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
@@ -435,7 +433,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = OMAP3630_REV_ES1_2;
cpu_rev = "1.2";
@@ -456,7 +453,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8168_REV_ES2_1;
cpu_rev = "2.1";
@@ -473,7 +469,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM335X_REV_ES2_1;
cpu_rev = "2.1";
@@ -491,7 +486,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM437X_REV_ES1_2;
cpu_rev = "1.2";
@@ -502,7 +496,6 @@ void __init omap3xxx_check_revision(void)
case 0xb968:
switch (rev) {
case 0:
- /* FALLTHROUGH */
case 1:
omap_revision = TI8148_REV_ES1_0;
cpu_rev = "1.0";
@@ -512,7 +505,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8148_REV_ES2_1;
cpu_rev = "2.1";
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 54aff33e55e6..bfa5e1b8dba7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev)
return pwrdm;
clk = of_clk_get(dev->of_node->parent, 0);
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(dev, "no fck found\n");
return NULL;
}
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 6b4548f3b57f..fc7bb2ca1672 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -240,7 +240,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
omap_auxdata_legacy_init(dev);
- /* fall through */
+ fallthrough;
default:
od = to_omap_device(pdev);
if (od)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 6df395fff971..f5dfddf492e2 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
return;
- trace_cpu_idle_rcuidle(1, smp_processor_id());
-
omap_sram_idle();
-
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index d13344b2ddcd..87cb47220e82 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -624,7 +624,7 @@ static void __init dns323_init(void)
dns323ab_leds[0].active_low = 1;
gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
- /* Fall through */
+ fallthrough;
case DNS323_REV_B1:
i2c_register_board_info(0, dns323ab_i2c_devices,
ARRAY_SIZE(dns323ab_i2c_devices));
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index ea2c84214bac..d23970bd638d 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,7 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
- /* Fall through - ??? */
+ fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
default:
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 76a65df42d10..d5c805adf7a8 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
switch (err) {
case -ENOSYS:
tegra_cpu_reset_handler_set(reset_address);
- /* fall through */
+ fallthrough;
case 0:
is_enabled = true;
break;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index f4bfc1cac91a..ea81e89e7740 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -694,7 +694,7 @@ thumb2arm(u16 tinstr)
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
- /* Else, fall through - for illegal instruction case */
+ fallthrough; /* for illegal instruction case */
default:
return BAD_INSTR;
@@ -750,7 +750,7 @@ do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
- /* Fall through */
+ fallthrough;
case 0xe940:
case 0xe9c0:
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index c0fbfca5da8b..114c05ab4dd9 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -71,7 +71,7 @@ static void cpu_v7_spectre_init(void)
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
- /* fallthrough */
+ fallthrough;
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b2e9e822426f..1eb59003bdec 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -309,14 +309,14 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
*/
- /* fall through */
+ fallthrough;
case OMAP_DMA_DATA_BURST_16:
if (dma_omap2plus()) {
burst = 0x3;
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
BUG();
}
@@ -393,7 +393,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
diff --git a/arch/arm/probes/decode.c b/arch/arm/probes/decode.c
index fe81a9c21f2d..c84053a81358 100644
--- a/arch/arm/probes/decode.c
+++ b/arch/arm/probes/decode.c
@@ -307,7 +307,7 @@ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify)
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
- /* fall through... */
+ fallthrough;
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 90b5bc723c83..feefa2055eba 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -280,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
- /* fall through */
+ fallthrough;
default:
/* impossible cases */
BUG();
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index e93145d72c26..a6ab3689b2f4 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -150,7 +150,7 @@ static int xen_starting_cpu(unsigned int cpu)
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
- info.mfn = virt_to_gfn(vcpup);
+ info.mfn = percpu_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup);
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d232837cbee..43091f439e4e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -29,6 +29,7 @@ config ARM64
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
+ select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -211,12 +212,18 @@ config ARM64_PAGE_SHIFT
default 14 if ARM64_16K_PAGES
default 12
-config ARM64_CONT_SHIFT
+config ARM64_CONT_PTE_SHIFT
int
default 5 if ARM64_64K_PAGES
default 7 if ARM64_16K_PAGES
default 4
+config ARM64_CONT_PMD_SHIFT
+ int
+ default 5 if ARM64_64K_PAGES
+ default 5 if ARM64_16K_PAGES
+ default 4
+
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 16 if ARM64_16K_PAGES
@@ -1165,32 +1172,6 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y.
-config HARDEN_BRANCH_PREDICTOR
- bool "Harden the branch predictor against aliasing attacks" if EXPERT
- default y
- help
- Speculation attacks against some high-performance processors rely on
- being able to manipulate the branch predictor for a victim context by
- executing aliasing branches in the attacker context. Such attacks
- can be partially mitigated against by clearing internal branch
- predictor state and limiting the prediction logic in some situations.
-
- This config option will take CPU-specific actions to harden the
- branch predictor against aliasing attacks and may rely on specific
- instruction sequences or control bits being set by the system
- firmware.
-
- If unsure, say Y.
-
-config ARM64_SSBD
- bool "Speculative Store Bypass Disable" if EXPERT
- default y
- help
- This enables mitigation of the bypassing of previous stores
- by speculative loads.
-
- If unsure, say Y.
-
config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases"
default y
@@ -1664,6 +1645,39 @@ config ARCH_RANDOM
provides a high bandwidth, cryptographically secure
hardware random number generator.
+config ARM64_AS_HAS_MTE
+ # Initial support for MTE went in binutils 2.32.0, checked with
+ # ".arch armv8.5-a+memtag" below. However, this was incomplete
+ # as a late addition to the final architecture spec (LDGM/STGM)
+ # is only supported in the newer 2.32.x and 2.33 binutils
+ # versions, hence the extra "stgm" instruction check below.
+ def_bool $(as-instr,.arch armv8.5-a+memtag\nstgm xzr$(comma)[x0])
+
+config ARM64_MTE
+ bool "Memory Tagging Extension support"
+ default y
+ depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
+ select ARCH_USES_HIGH_VMA_FLAGS
+ help
+ Memory Tagging (part of the ARMv8.5 Extensions) provides
+ architectural support for run-time, always-on detection of
+ various classes of memory error to aid with software debugging
+ to eliminate vulnerabilities arising from memory-unsafe
+ languages.
+
+ This option enables the support for the Memory Tagging
+ Extension at EL0 (i.e. for userspace).
+
+ Selecting this option allows the feature to be detected at
+ runtime. Any secondary CPU not implementing this feature will
+ not be allowed a late bring-up.
+
+ Userspace binaries that want to use this feature must
+ explicitly opt in. The mechanism for the userspace is
+ described in:
+
+ Documentation/arm64/memory-tagging-extension.rst.
+
endmenu
config ARM64_SVE
@@ -1876,6 +1890,10 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on HUGETLB_PAGE && MIGRATION
+config ARCH_ENABLE_THP_MIGRATION
+ def_bool y
+ depends on TRANSPARENT_HUGEPAGE
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 55bc8546d9c7..0fd4c1be4f64 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -11,7 +11,6 @@
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X
-CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -82,8 +81,8 @@ endif
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
-ifneq ($(CONFIG_AS_HAS_ARMV8_4), y)
-branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+ifeq ($(CONFIG_AS_HAS_PAC), y)
+asm-arch := armv8.3-a
endif
endif
@@ -91,7 +90,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y)
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
# make sure to pass the newest target architecture to -march.
-KBUILD_CFLAGS += -Wa,-march=armv8.4-a
+asm-arch := armv8.4-a
+endif
+
+ifdef asm-arch
+KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
+ -DARM64_ASM_ARCH='"$(asm-arch)"'
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
@@ -127,9 +131,6 @@ endif
# Default value
head-y := arch/arm64/kernel/head.o
-# The byte offset of the kernel image in RAM from the start of RAM.
-TEXT_OFFSET := 0x0
-
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4
else
@@ -140,8 +141,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
-export TEXT_OFFSET
-
core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
@@ -165,6 +164,8 @@ zinstall install:
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+ $(if $(CONFIG_COMPAT_VDSO), \
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 15f7b0ed3836..39802066232e 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index a39f0a1723e0..903c0eb61290 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 9de2aa1c573c..a5154f13a18e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -702,7 +702,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
- <&clk IMX8MP_CLK_SDMA1_ROOT>;
+ <&clk IMX8MP_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index f70435cf9ad5..561fa792fe5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -423,7 +423,7 @@
tmu: tmu@30260000 {
compatible = "fsl,imx8mq-tmu";
reg = <0x30260000 0x10000>;
- interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
little-endian;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 1a39e0ef776b..5b9ec032ce8d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -686,6 +686,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
<&topckgen CLK_TOP_MSDC50_0_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 34d249d85da7..8eb61dd9921e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -337,8 +337,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03400000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC1>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -366,8 +367,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03420000 0x0 0x10000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC2>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC2>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>,
@@ -390,8 +392,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03440000 0x0 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC3>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>,
@@ -416,8 +419,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03460000 0x0 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
<&bpmp TEGRA186_CLK_PLLC4_VCO>;
assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 48160f48003a..ca5cb6aef5ee 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -460,8 +460,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03400000 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC1>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -485,8 +486,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03440000 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC3>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>,
@@ -511,8 +513,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03460000 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
<&bpmp TEGRA194_CLK_PLLC4>;
assigned-clock-parents =
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 829f786af133..8cca2166a446 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1194,8 +1194,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 14>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1222,8 +1223,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0200 0x0 0x200>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC2>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 9>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-1v8-drv";
@@ -1239,8 +1241,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0400 0x0 0x200>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC3>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 69>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1262,8 +1265,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0600 0x0 0x200>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC4>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 15>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv";
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 9edfae5944f7..24ef18fe77df 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -417,10 +417,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <100>;
+ ti,interrupt-ranges = <0 392 32>;
};
main_navss {
@@ -438,10 +438,11 @@
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x0>, <0x2>;
+ ti,sci-dev-id = <182>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>;
};
inta_main_udmass: interrupt-controller@33d00000 {
@@ -452,8 +453,7 @@
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <179>;
- ti,sci-rm-range-vint = <0x0>;
- ti,sci-rm-range-global-event = <0x1>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
@@ -589,7 +589,7 @@
<0x0 0x33000000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <818>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <187>;
@@ -609,11 +609,11 @@
ti,sci-dev-id = <188>;
ti,ringacc = <&ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
- <0x5>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
cpts@310d0000 {
@@ -622,7 +622,7 @@
reg-names = "cpts";
clocks = <&main_cpts_mux>;
clock-names = "cpts";
- interrupts-extended = <&intr_main_navss 163 0>;
+ interrupts-extended = <&intr_main_navss 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
@@ -645,8 +645,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <57 256>, <57 257>, <57 258>, <57 259>, <57 260>,
- <57 261>;
+ interrupts = <192>, <193>, <194>, <195>, <196>, <197>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <96>;
@@ -661,8 +660,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <58 256>, <58 257>, <58 258>, <58 259>, <58 260>,
- <58 261>;
+ interrupts = <200>, <201>, <202>, <203>, <204>, <205>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <90>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 8c1abcfe0860..51ca4b4d4c21 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -134,7 +134,7 @@
<0x0 0x2a500000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <286>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <195>;
@@ -154,11 +154,11 @@
ti,sci-dev-id = <194>;
ti,ringacc = <&mcu_ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */
- <0x4>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index 5f55b9e82cf1..a1ffe88d9664 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -74,10 +74,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x4>;
+ ti,sci-dev-id = <156>;
+ ti,interrupt-ranges = <0 712 16>;
};
wkup_gpio0: wkup_gpio0@42110000 {
@@ -86,7 +86,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_wkup_gpio>;
- interrupts = <59 128>, <59 129>, <59 130>, <59 131>;
+ interrupts = <60>, <61>, <62>, <63>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <56>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 611e66207010..b8a8a0fcb8af 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -384,7 +384,7 @@
};
&mailbox0_cluster0 {
- interrupts = <164 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-tx = <1 0 0>;
@@ -393,7 +393,7 @@
};
&mailbox0_cluster1 {
- interrupts = <165 0>;
+ interrupts = <432>;
mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 {
ti,mbox-tx = <1 0 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 8bc1e6ecc50e..e8fc01d97ada 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -287,7 +287,7 @@
};
&mailbox0_cluster0 {
- interrupts = <214 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
@@ -301,7 +301,7 @@
};
&mailbox0_cluster1 {
- interrupts = <215 0>;
+ interrupts = <432>;
mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
@@ -315,7 +315,7 @@
};
&mailbox0_cluster2 {
- interrupts = <216 0>;
+ interrupts = <428>;
mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
ti,mbox-rx = <0 0 0>;
@@ -329,7 +329,7 @@
};
&mailbox0_cluster3 {
- interrupts = <217 0>;
+ interrupts = <424>;
mbox_c66_0: mbox-c66-0 {
ti,mbox-rx = <0 0 0>;
@@ -343,7 +343,7 @@
};
&mailbox0_cluster4 {
- interrupts = <218 0>;
+ interrupts = <420>;
mbox_c71_0: mbox-c71-0 {
ti,mbox-rx = <0 0 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index d14060207f00..12ceea9b3c9a 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -80,10 +80,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <131>;
+ ti,interrupt-ranges = <8 392 56>;
};
main_navss {
@@ -101,10 +101,12 @@
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0>, <2>;
+ ti,sci-dev-id = <213>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>,
+ <128 672 64>;
};
main_udmass_inta: interrupt-controller@33d00000 {
@@ -115,8 +117,7 @@
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <209>;
- ti,sci-rm-range-vint = <0xa>;
- ti,sci-rm-range-global-event = <0xd>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
@@ -296,7 +297,7 @@
reg-names = "cpts";
clocks = <&k3_clks 201 1>;
clock-names = "cpts";
- interrupts-extended = <&main_navss_intr 201 0>;
+ interrupts-extended = <&main_navss_intr 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
@@ -688,8 +689,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <105 0>, <105 1>, <105 2>, <105 3>,
- <105 4>, <105 5>, <105 6>, <105 7>;
+ interrupts = <256>, <257>, <258>, <259>,
+ <260>, <261>, <262>, <263>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -705,7 +706,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <106 0>, <106 1>, <106 2>;
+ interrupts = <288>, <289>, <290>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -721,8 +722,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <107 0>, <107 1>, <107 2>, <107 3>,
- <107 4>, <107 5>, <107 6>, <107 7>;
+ interrupts = <264>, <265>, <266>, <267>,
+ <268>, <269>, <270>, <271>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -738,7 +739,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <108 0>, <108 1>, <108 2>;
+ interrupts = <292>, <293>, <294>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -754,8 +755,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <109 0>, <109 1>, <109 2>, <109 3>,
- <109 4>, <109 5>, <109 6>, <109 7>;
+ interrupts = <272>, <273>, <274>, <275>,
+ <276>, <277>, <278>, <279>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -771,7 +772,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <110 0>, <110 1>, <110 2>;
+ interrupts = <296>, <297>, <298>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
@@ -787,8 +788,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <111 0>, <111 1>, <111 2>, <111 3>,
- <111 4>, <111 5>, <111 6>, <111 7>;
+ interrupts = <280>, <281>, <282>, <283>,
+ <284>, <285>, <286>, <287>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
@@ -804,7 +805,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <112 0>, <112 1>, <112 2>;
+ interrupts = <300>, <301>, <302>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 30a735bcd0c8..c4a48e8d420a 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -101,10 +101,10 @@
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x5>;
+ ti,sci-dev-id = <137>;
+ ti,interrupt-ranges = <16 960 16>;
};
wkup_gpio0: gpio@42110000 {
@@ -113,8 +113,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <113 0>, <113 1>, <113 2>,
- <113 3>, <113 4>, <113 5>;
+ interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
@@ -130,8 +129,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <114 0>, <114 1>, <114 2>,
- <114 3>, <114 4>, <114 5>;
+ interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 9174ddc76bdc..3ec99f13c259 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@@ -558,6 +559,15 @@
};
};
+ psgtr: phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ #phy-cells = <4>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -601,7 +611,7 @@
power-domains = <&zynqmp_firmware PD_SD_1>;
};
- smmu: smmu@fd800000 {
+ smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
status = "disabled";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e0f33826819f..6d04b9577b0b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
CONFIG_USB_TEGRA_XUDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_FUSB302=m
@@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_IIO=y
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index b357164379f6..63a52ad9a75c 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -788,7 +788,7 @@ SYM_FUNC_START_LOCAL(__xts_crypt8)
0: mov bskey, x21
mov rounds, x22
- br x7
+ br x16
SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
@@ -806,7 +806,7 @@ SYM_FUNC_END(__xts_crypt8)
uzp1 v30.4s, v30.4s, v25.4s
ld1 {v25.16b}, [x24]
-99: adr x7, \do8
+99: adr x16, \do8
bl __xts_crypt8
ldp q16, q17, [sp, #.Lframe_local_offset]
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 44209f6146aa..ffb1a40d5475 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -79,10 +79,5 @@ arch_get_random_seed_long_early(unsigned long *v)
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
-#else
-
-static inline bool __arm64_rndr(unsigned long *v) { return false; }
-static inline bool __init __early_cpu_has_rndr(void) { return false; }
-
#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index c7f67da13cd9..3e7943fd17a4 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -13,8 +13,7 @@
#define MAX_FDT_SIZE SZ_2M
/*
- * arm64 requires the kernel image to placed
- * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ * arm64 requires the kernel image to placed at a 2 MB aligned base address
*/
#define MIN_KIMG_ALIGN SZ_2M
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 51a7ce87cdfe..6fb2e6bcc392 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -2,6 +2,12 @@
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
+#ifdef ARM64_ASM_ARCH
+#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n"
+#else
+#define ARM64_ASM_PREAMBLE
+#endif
+
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index d28e8f37d3b4..e95c4df83911 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -21,7 +21,7 @@
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
* @cpu_boot: Boots a cpu into the kernel.
- * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necessary
* synchronisation. Called from the cpu being booted.
* @cpu_can_disable: Determines whether a CPU can be disabled based on
* mechanism-specific information.
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 07b643a70710..42868dbd29fd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -31,13 +31,13 @@
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_HARDEN_BRANCH_PREDICTOR 24
+#define ARM64_SPECTRE_V2 24
#define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
-#define ARM64_SSBD 30
+#define ARM64_SPECTRE_V4 30
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33
@@ -64,7 +64,8 @@
#define ARM64_BTI 54
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
+#define ARM64_MTE 57
-#define ARM64_NCAPS 57
+#define ARM64_NCAPS 58
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 89b4f0142c28..f7e7144af174 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
}
/*
- * Generic helper for handling capabilties with multiple (match,enable) pairs
+ * Generic helper for handling capabilities with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches.
*/
@@ -681,6 +681,12 @@ static __always_inline bool system_uses_irq_prio_masking(void)
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
}
+static inline bool system_supports_mte(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_MTE) &&
+ cpus_have_const_cap(ARM64_MTE);
+}
+
static inline bool system_has_prio_mask_debugging(void)
{
return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
@@ -698,30 +704,6 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
}
-#define ARM64_BP_HARDEN_UNKNOWN -1
-#define ARM64_BP_HARDEN_WA_NEEDED 0
-#define ARM64_BP_HARDEN_NOT_REQUIRED 1
-
-int get_spectre_v2_workaround_state(void);
-
-#define ARM64_SSBD_UNKNOWN -1
-#define ARM64_SSBD_FORCE_DISABLE 0
-#define ARM64_SSBD_KERNEL 1
-#define ARM64_SSBD_FORCE_ENABLE 2
-#define ARM64_SSBD_MITIGATED 3
-
-static inline int arm64_get_ssbd_state(void)
-{
-#ifdef CONFIG_ARM64_SSBD
- extern int ssbd_state;
- return ssbd_state;
-#else
- return ARM64_SSBD_UNKNOWN;
-#endif
-}
-
-void arm64_set_ssbd_mitigation(bool state);
-
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..22c81f1edda2 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -35,7 +35,9 @@
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
-/* Unallocated EC: 0x1b - 0x1E */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+/* Unallocated EC: 0x1D - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 7577a754d443..99b9383cd036 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 840a35ed92ec..b15eb4a3e6b2 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -22,6 +22,15 @@ struct exception_table_entry
#define ARCH_HAS_RELATIVE_EXTABLE
+static inline bool in_bpf_jit(struct pt_regs *regs)
+{
+ if (!IS_ENABLED(CONFIG_BPF_JIT))
+ return false;
+
+ return regs->pc >= BPF_JIT_REGION_START &&
+ regs->pc < BPF_JIT_REGION_END;
+}
+
#ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 59f10dd13f12..bec5f14b622a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,6 +69,9 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
+extern void sve_flush_live(void);
+extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
+ unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
struct arm64_cpu_capabilities;
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 636e9d9c7929..af43367534c7 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -164,25 +164,59 @@
| ((\np) << 5)
.endm
+/* PFALSE P\np.B */
+.macro _sve_pfalse np
+ _sve_check_preg \np
+ .inst 0x2518e400 \
+ | (\np)
+.endm
+
.macro __for from:req, to:req
.if (\from) == (\to)
- _for__body \from
+ _for__body %\from
.else
- __for \from, (\from) + ((\to) - (\from)) / 2
- __for (\from) + ((\to) - (\from)) / 2 + 1, \to
+ __for %\from, %((\from) + ((\to) - (\from)) / 2)
+ __for %((\from) + ((\to) - (\from)) / 2 + 1), %\to
.endif
.endm
.macro _for var:req, from:req, to:req, insn:vararg
.macro _for__body \var:req
+ .noaltmacro
\insn
+ .altmacro
.endm
+ .altmacro
__for \from, \to
+ .noaltmacro
.purgem _for__body
.endm
+/* Update ZCR_EL1.LEN with the new VQ */
+.macro sve_load_vq xvqminus1, xtmp, xtmp2
+ mrs_s \xtmp, SYS_ZCR_EL1
+ bic \xtmp2, \xtmp, ZCR_ELx_LEN_MASK
+ orr \xtmp2, \xtmp2, \xvqminus1
+ cmp \xtmp2, \xtmp
+ b.eq 921f
+ msr_s SYS_ZCR_EL1, \xtmp2 //self-synchronising
+921:
+.endm
+
+/* Preserve the first 128-bits of Znz and zero the rest. */
+.macro _sve_flush_z nz
+ _sve_check_zreg \nz
+ mov v\nz\().16b, v\nz\().16b
+.endm
+
+.macro sve_flush
+ _for n, 0, 31, _sve_flush_z \n
+ _for n, 0, 15, _sve_pfalse \n
+ _sve_wrffr 0
+.endm
+
.macro sve_save nxbase, xpfpsr, nxtmp
_for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
@@ -197,13 +231,7 @@
.endm
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
- mrs_s x\nxtmp, SYS_ZCR_EL1
- bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK
- orr \xtmp2, \xtmp2, \xvqminus1
- cmp \xtmp2, x\nxtmp
- b.eq 921f
- msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising
-921:
+ sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 22f73fe09030..9a5498c2c8ee 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -8,18 +8,27 @@
#include <uapi/asm/hwcap.h>
#include <asm/cpufeature.h>
+#define COMPAT_HWCAP_SWP (1 << 0)
#define COMPAT_HWCAP_HALF (1 << 1)
#define COMPAT_HWCAP_THUMB (1 << 2)
+#define COMPAT_HWCAP_26BIT (1 << 3)
#define COMPAT_HWCAP_FAST_MULT (1 << 4)
+#define COMPAT_HWCAP_FPA (1 << 5)
#define COMPAT_HWCAP_VFP (1 << 6)
#define COMPAT_HWCAP_EDSP (1 << 7)
+#define COMPAT_HWCAP_JAVA (1 << 8)
+#define COMPAT_HWCAP_IWMMXT (1 << 9)
+#define COMPAT_HWCAP_CRUNCH (1 << 10)
+#define COMPAT_HWCAP_THUMBEE (1 << 11)
#define COMPAT_HWCAP_NEON (1 << 12)
#define COMPAT_HWCAP_VFPv3 (1 << 13)
+#define COMPAT_HWCAP_VFPV3D16 (1 << 14)
#define COMPAT_HWCAP_TLS (1 << 15)
#define COMPAT_HWCAP_VFPv4 (1 << 16)
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_VFPD32 (1 << 19)
#define COMPAT_HWCAP_LPAE (1 << 20)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
@@ -95,7 +104,7 @@
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
-/* reserved for KERNEL_HWCAP_MTE __khwcap2_feature(MTE) */
+#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 0bc46149e491..4b39293d0f72 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
+__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
+__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
+__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF)
__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
+__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa4b6521ef14..ff328e5bbb75 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return res;
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 329fb15f6bac..19ca76ea60d9 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -86,7 +86,7 @@
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 51c1d9918999..64ce29378467 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,6 +12,7 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_ATA (UL(1) << 56)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
@@ -66,18 +67,19 @@
* TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
+ * FB: Force broadcast of all maintenance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO)
+ HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
-#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index fb1a922b31ba..7f7072f6cb45 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -9,9 +9,6 @@
#include <asm/virt.h>
-#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
-#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
-
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
@@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
-#endif
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
@@ -169,6 +164,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
*__hyp_this_cpu_ptr(sym); \
})
+#define __KVM_EXTABLE(from, to) \
+ " .pushsection __kvm_ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
+
+#define __kvm_at(at_op, addr) \
+( { \
+ int __kvm_at_err = 0; \
+ u64 spsr, elr; \
+ asm volatile( \
+ " mrs %1, spsr_el2\n" \
+ " mrs %2, elr_el2\n" \
+ "1: at "at_op", %3\n" \
+ " isb\n" \
+ " b 9f\n" \
+ "2: msr spsr_el2, %1\n" \
+ " msr elr_el2, %2\n" \
+ " mov %w0, %4\n" \
+ "9:\n" \
+ __KVM_EXTABLE(1b, 2b) \
+ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
+ : "r" (addr), "i" (-EFAULT)); \
+ __kvm_at_err; \
+} )
+
+
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
@@ -193,6 +216,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+ .pushsection __kvm_ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+.endm
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 49a55be2b9a2..5ef2669ccd6c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
}
+/* Always check for S1PTW *before* using this. */
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
}
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
@@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
+ if (kvm_vcpu_abt_iss1tw(vcpu))
+ return true;
+
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
@@ -383,20 +391,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
- if (flag)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
- else
- vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
-}
-
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 65568b23868a..bb5e5b88d439 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@@ -473,7 +472,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
@@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
-#define KVM_BP_HARDEN_UNKNOWN -1
-#define KVM_BP_HARDEN_WA_NEEDED 0
-#define KVM_BP_HARDEN_NOT_REQUIRED 1
-
-static inline int kvm_arm_harden_branch_predictor(void)
-{
- switch (get_spectre_v2_workaround_state()) {
- case ARM64_BP_HARDEN_WA_NEEDED:
- return KVM_BP_HARDEN_WA_NEEDED;
- case ARM64_BP_HARDEN_NOT_REQUIRED:
- return KVM_BP_HARDEN_NOT_REQUIRED;
- case ARM64_BP_HARDEN_UNKNOWN:
- default:
- return KVM_BP_HARDEN_UNKNOWN;
- }
-}
-
-#define KVM_SSBD_UNKNOWN -1
-#define KVM_SSBD_FORCE_DISABLE 0
-#define KVM_SSBD_KERNEL 1
-#define KVM_SSBD_FORCE_ENABLE 2
-#define KVM_SSBD_MITIGATED 3
-
-static inline int kvm_arm_have_ssbd(void)
-{
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_FORCE_DISABLE:
- return KVM_SSBD_FORCE_DISABLE;
- case ARM64_SSBD_KERNEL:
- return KVM_SSBD_KERNEL;
- case ARM64_SSBD_FORCE_ENABLE:
- return KVM_SSBD_FORCE_ENABLE;
- case ARM64_SSBD_MITIGATED:
- return KVM_SSBD_MITIGATED;
- case ARM64_SSBD_UNKNOWN:
- default:
- return KVM_SSBD_UNKNOWN;
- }
-}
-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 189839c3706a..cff1cebc7590 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/cpufeature.h>
/*
@@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret;
}
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
* EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present:
*
- * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
- * hardening sequence is placed in one of the vector slots, which is
- * executed before jumping to the real vectors.
+ * - If the CPU is affected by Spectre-v2, the hardening sequence is
+ * placed in one of the vector slots, which is executed before jumping
+ * to the real vectors.
*
- * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
- * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
- * hardening sequence is mapped next to the idmap page, and executed
- * before jumping to the real vectors.
+ * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ * containing the hardening sequence is mapped next to the idmap page,
+ * and executed before jumping to the real vectors.
*
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and
@@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored.
*/
-#include <asm/mmu.h>
-
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
-/* This is called on both VHE and !VHE systems */
static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
@@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
return vect;
}
-/* This is only called on a !VHE system */
-static inline int kvm_map_vectors(void)
-{
- /*
- * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
- * HEL2 = ARM64_HARDEN_EL2_VECTORS
- *
- * !HBP + !HEL2 -> use direct vectors
- * HBP + !HEL2 -> use hardened vectors in place
- * !HBP + HEL2 -> allocate one vector slot and use exec mapping
- * HBP + HEL2 -> use hardened vertors and use exec mapping
- */
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
- __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
- __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
- }
-
- if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
- phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
- unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-
- /*
- * Always allocate a spare vector slot, as we don't
- * know yet which CPUs have a BP hardening slot that
- * we can reuse.
- */
- __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
- return create_hyp_exec_mappings(vect_pa, size,
- &__kvm_bp_vect_base);
- }
-
- return 0;
-}
-#else
-static inline void *kvm_get_hyp_vector(void)
-{
- return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
-}
-
-static inline int kvm_map_vectors(void)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ARM64_SSBD
-DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-static inline int hyp_map_aux_data(void)
-{
- int cpu, err;
-
- for_each_possible_cpu(cpu) {
- u64 *ptr;
-
- ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
- err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
- if (err)
- return err;
- }
- return 0;
-}
-#else
-static inline int hyp_map_aux_data(void)
-{
- return 0;
-}
-#endif
-
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index afa722504bfd..43640d797455 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -126,13 +126,18 @@
/*
* Memory types available.
+ *
+ * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in
+ * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note
+ * that protection_map[] only contains MT_NORMAL attributes.
*/
-#define MT_DEVICE_nGnRnE 0
-#define MT_DEVICE_nGnRE 1
-#define MT_DEVICE_GRE 2
-#define MT_NORMAL_NC 3
-#define MT_NORMAL 4
-#define MT_NORMAL_WT 5
+#define MT_NORMAL 0
+#define MT_NORMAL_TAGGED 1
+#define MT_NORMAL_NC 2
+#define MT_NORMAL_WT 3
+#define MT_DEVICE_nGnRnE 4
+#define MT_DEVICE_nGnRE 5
+#define MT_DEVICE_GRE 6
/*
* Memory types for Stage-2 translation
@@ -169,7 +174,7 @@ extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
-/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+/* the virtual base of the kernel image */
extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 081ec8de9ea6..e3e28f7daf62 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -9,16 +9,53 @@
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey __always_unused)
{
+ unsigned long ret = 0;
+
if (system_supports_bti() && (prot & PROT_BTI))
- return VM_ARM64_BTI;
+ ret |= VM_ARM64_BTI;
- return 0;
+ if (system_supports_mte() && (prot & PROT_MTE))
+ ret |= VM_MTE;
+
+ return ret;
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
+{
+ /*
+ * Only allow MTE on anonymous mappings as these are guaranteed to be
+ * backed by tags-capable memory. The vm_flags may be overridden by a
+ * filesystem supporting MTE (RAM-based).
+ */
+ if (system_supports_mte() && (flags & MAP_ANONYMOUS))
+ return VM_MTE_ALLOWED;
+
+ return 0;
+}
+#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
+
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0);
+ pteval_t prot = 0;
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
@@ -30,8 +67,21 @@ static inline bool arch_validate_prot(unsigned long prot,
if (system_supports_bti())
supported |= PROT_BTI;
+ if (system_supports_mte())
+ supported |= PROT_MTE;
+
return (prot & ~supported) == 0;
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
+static inline bool arch_validate_flags(unsigned long vm_flags)
+{
+ if (!system_supports_mte())
+ return true;
+
+ /* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */
+ return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED);
+}
+#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
+
#endif /* ! __ASM_MMAN_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index a7a5ecaa2e83..b2e91c187e2a 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,11 +17,14 @@
#ifndef __ASSEMBLY__
+#include <linux/refcount.h>
+
typedef struct {
atomic64_t id;
#ifdef CONFIG_COMPAT
void *sigpage;
#endif
+ refcount_t pinned;
void *vdso;
unsigned long flags;
} mm_context_t;
@@ -45,7 +48,6 @@ struct bp_hardening_data {
bp_hardening_cb_t fn;
};
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -57,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
- if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return;
d = arm64_get_bp_hardening_data();
if (d->fn)
d->fn();
}
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
- return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void) { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void arm64_memblock_init(void);
extern void paging_init(void);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f2d7537d6f83..0672236e1aea 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -177,7 +177,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm);
-#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ atomic64_set(&mm->context.id, 0);
+ refcount_set(&mm->context.pinned, 0);
+ return 0;
+}
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -248,6 +254,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
+unsigned long arm64_mm_context_get(struct mm_struct *mm);
+void arm64_mm_context_put(struct mm_struct *mm);
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
new file mode 100644
index 000000000000..1c99fcadb58c
--- /dev/null
+++ b/arch/arm64/include/asm/mte.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_H
+#define __ASM_MTE_H
+
+#define MTE_GRANULE_SIZE UL(16)
+#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
+#define MTE_TAG_SHIFT 56
+#define MTE_TAG_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+#include <linux/page-flags.h>
+
+#include <asm/pgtable-types.h>
+
+void mte_clear_page_tags(void *addr);
+unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
+ unsigned long n);
+unsigned long mte_copy_tags_to_user(void __user *to, void *from,
+ unsigned long n);
+int mte_save_tags(struct page *page);
+void mte_save_page_tags(const void *page_addr, void *tag_storage);
+bool mte_restore_tags(swp_entry_t entry, struct page *page);
+void mte_restore_page_tags(void *page_addr, const void *tag_storage);
+void mte_invalidate_tags(int type, pgoff_t offset);
+void mte_invalidate_tags_area(int type);
+void *mte_allocate_tag_storage(void);
+void mte_free_tag_storage(char *storage);
+
+#ifdef CONFIG_ARM64_MTE
+
+/* track which pages have valid allocation tags */
+#define PG_mte_tagged PG_arch_2
+
+void mte_sync_tags(pte_t *ptep, pte_t pte);
+void mte_copy_page_tags(void *kto, const void *kfrom);
+void flush_mte_state(void);
+void mte_thread_switch(struct task_struct *next);
+void mte_suspend_exit(void);
+long set_mte_ctrl(struct task_struct *task, unsigned long arg);
+long get_mte_ctrl(struct task_struct *task);
+int mte_ptrace_copy_tags(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+
+#else
+
+/* unused if !CONFIG_ARM64_MTE, silence the compiler */
+#define PG_mte_tagged 0
+
+static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+}
+static inline void mte_copy_page_tags(void *kto, const void *kfrom)
+{
+}
+static inline void flush_mte_state(void)
+{
+}
+static inline void mte_thread_switch(struct task_struct *next)
+{
+}
+static inline void mte_suspend_exit(void)
+{
+}
+static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+{
+ return 0;
+}
+static inline long get_mte_ctrl(struct task_struct *task)
+{
+ return 0;
+}
+static inline int mte_ptrace_copy_tags(struct task_struct *child,
+ long request, unsigned long addr,
+ unsigned long data)
+{
+ return -EIO;
+}
+
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_MTE_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 626ad01e83bf..dd870390d639 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
return node_to_cpumask_map[node];
}
#endif
diff --git a/arch/arm64/include/asm/page-def.h b/arch/arm64/include/asm/page-def.h
index f99d48ecbeef..2403f7b4cdbf 100644
--- a/arch/arm64/include/asm/page-def.h
+++ b/arch/arm64/include/asm/page-def.h
@@ -11,13 +11,8 @@
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
-/* CONT_SHIFT determines the number of pages which can be tracked together */
#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
-#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
-#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT))
-#define CONT_MASK (~(CONT_SIZE-1))
-
#endif /* __ASM_PAGE_DEF_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index c01b52add377..012cffc574e8 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -15,18 +15,25 @@
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <asm/pgtable-types.h>
-extern void __cpu_clear_user_page(void *p, unsigned long user);
-extern void __cpu_copy_user_page(void *to, const void *from,
- unsigned long user);
+struct page;
+struct vm_area_struct;
+
extern void copy_page(void *to, const void *from);
extern void clear_page(void *to);
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_highpage(struct page *to, struct page *from);
+#define __HAVE_ARCH_COPY_HIGHPAGE
+
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
-#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t;
@@ -36,7 +43,7 @@ extern int pfn_valid(unsigned long);
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED)
#include <asm-generic/getorder.h>
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 70b323cf8300..b33ca260e3c9 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -17,6 +17,7 @@
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
+#define arch_can_pci_mmap_wc() 1
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy;
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2c2d7dbe8a02..60731f602d3e 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -236,6 +236,9 @@
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* PMMIR_EL1.SLOTS mask */
+#define ARMV8_PMU_SLOTS_MASK 0xff
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index d400a4d9aee2..94b3f2ac2e9d 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -81,25 +81,15 @@
/*
* Contiguous page definitions.
*/
-#ifdef CONFIG_ARM64_64K_PAGES
-#define CONT_PTE_SHIFT (5 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define CONT_PTE_SHIFT (7 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (5 + PMD_SHIFT)
-#else
-#define CONT_PTE_SHIFT (4 + PAGE_SHIFT)
-#define CONT_PMD_SHIFT (4 + PMD_SHIFT)
-#endif
-
+#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
#define CONT_PTES (1 << (CONT_PTE_SHIFT - PAGE_SHIFT))
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
+
+#define CONT_PMD_SHIFT (CONFIG_ARM64_CONT_PMD_SHIFT + PMD_SHIFT)
#define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT))
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
-/* the numerical offset of the PTE within a range of CONT_PTES */
-#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
/*
* Hardware page table definitions.
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 4d867c6446c4..4cd0d6ca8aa1 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -19,6 +19,13 @@
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
+/*
+ * This bit indicates that the entry is present i.e. pmd_page()
+ * still points to a valid huge page in memory even if the pmd
+ * has been invalidated.
+ */
+#define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */
+
#ifndef __ASSEMBLY__
#include <asm/cpufeature.h>
@@ -50,6 +57,7 @@ extern bool arm64_use_ng_mappings;
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -59,6 +67,7 @@ extern bool arm64_use_ng_mappings;
#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
+#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d5d3fbe73953..a11bf52e0c38 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -9,6 +9,7 @@
#include <asm/proc-fns.h>
#include <asm/memory.h>
+#include <asm/mte.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable-prot.h>
#include <asm/tlbflush.h>
@@ -35,11 +36,6 @@
extern struct page *vmemmap;
-extern void __pte_error(const char *file, int line, unsigned long val);
-extern void __pmd_error(const char *file, int line, unsigned long val);
-extern void __pud_error(const char *file, int line, unsigned long val);
-extern void __pgd_error(const char *file, int line, unsigned long val);
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
@@ -51,13 +47,22 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
+ * Outside of a few very special situations (e.g. hibernation), we always
+ * use broadcast TLB invalidation instructions, therefore a spurious page
+ * fault on one CPU which has been handled concurrently by another CPU
+ * does not need to perform additional invalidation.
+ */
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+
+/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
-#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
/*
* Macros to convert between a physical address and its placement in a
@@ -90,6 +95,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
+#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
+ PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define pte_cont_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
@@ -145,6 +152,18 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
return pte;
}
+static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
+{
+ pmd_val(pmd) &= ~pgprot_val(prot);
+ return pmd;
+}
+
+static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
+{
+ pmd_val(pmd) |= pgprot_val(prot);
+ return pmd;
+}
+
static inline pte_t pte_wrprotect(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -284,6 +303,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
+ if (system_supports_mte() &&
+ pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
+ mte_sync_tags(ptep, pte);
+
__check_racy_pte_update(mm, ptep, pte);
set_pte(ptep, pte);
@@ -363,15 +386,24 @@ static inline int pmd_protnone(pmd_t pmd)
}
#endif
+#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
+}
+
/*
* THP definitions.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#define pmd_present(pmd) pte_present(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
@@ -381,7 +413,14 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
+ pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
+
+ return pmd;
+}
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
@@ -541,7 +580,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#if CONFIG_PGTABLE_LEVELS > 2
-#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
@@ -608,7 +648,8 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#if CONFIG_PGTABLE_LEVELS > 3
-#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
#define p4d_none(p4d) (!p4d_val(p4d))
#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
@@ -667,15 +708,21 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
-#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
+ /*
+ * Normal and Normal-Tagged are two different memory types and indices
+ * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
+ */
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
+ PTE_ATTRINDX_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
@@ -847,6 +894,11 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
/*
* Ensure that there are not more swap files than can be encoded in the kernel
* PTEs.
@@ -855,6 +907,38 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
extern int kern_addr_valid(unsigned long addr);
+#ifdef CONFIG_ARM64_MTE
+
+#define __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+ if (system_supports_mte())
+ return mte_save_tags(page);
+ return 0;
+}
+
+#define __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+ if (system_supports_mte())
+ mte_invalidate_tags(type, offset);
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+ if (system_supports_mte())
+ mte_invalidate_tags_area(type);
+}
+
+#define __HAVE_ARCH_SWAP_RESTORE
+static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
+{
+ if (system_supports_mte() && mte_restore_tags(entry, page))
+ set_bit(PG_mte_tagged, &page->flags);
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 240fe5e5b720..fce8cbecd6bc 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -38,6 +38,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
#include <asm/ptrace.h>
+#include <asm/spectre.h>
#include <asm/types.h>
/*
@@ -151,6 +152,10 @@ struct thread_struct {
struct ptrauth_keys_user keys_user;
struct ptrauth_keys_kernel keys_kernel;
#endif
+#ifdef CONFIG_ARM64_MTE
+ u64 sctlr_tcf0;
+ u64 gcr_user_incl;
+#endif
};
static inline void arch_thread_struct_whitelist(unsigned long *offset,
@@ -197,40 +202,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
-static inline void set_ssbs_bit(struct pt_regs *regs)
-{
- regs->pstate |= PSR_SSBS_BIT;
-}
-
-static inline void set_compat_ssbs_bit(struct pt_regs *regs)
-{
- regs->pstate |= PSR_AA32_SSBS_BIT;
-}
-
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t;
-
- if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- set_ssbs_bit(regs);
-
+ spectre_v4_enable_task_mitigation(current);
regs->sp = sp;
}
-static inline bool is_ttbr0_addr(unsigned long addr)
-{
- /* entry assembly clears tags for TTBR0 addrs */
- return addr < TASK_SIZE;
-}
-
-static inline bool is_ttbr1_addr(unsigned long addr)
-{
- /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
- return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
-}
-
#ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
@@ -244,13 +224,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT;
#endif
- if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- set_compat_ssbs_bit(regs);
-
+ spectre_v4_enable_task_mitigation(current);
regs->compat_sp = sp;
}
#endif
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+}
+
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -315,10 +305,10 @@ extern void __init minsigstksz_setup(void);
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
-long set_tagged_addr_ctrl(unsigned long arg);
-long get_tagged_addr_ctrl(void);
-#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
-#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
+long get_tagged_addr_ctrl(struct task_struct *task);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
#endif
/*
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
new file mode 100644
index 000000000000..fcdfbce302bd
--- /dev/null
+++ b/arch/arm64/include/asm/spectre.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for Spectre vulnerabilities.
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <will@kernel.org>
+ */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+#include <asm/cpufeature.h>
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+struct task_struct;
+
+enum mitigation_state arm64_get_spectre_v2_state(void);
+bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+
+enum mitigation_state arm64_get_spectre_v4_state(void);
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
+
+#endif /* __ASM_SPECTRE_H */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index fc7613023c19..eb29b1fe8255 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -63,7 +63,7 @@ struct stackframe {
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data);
+ bool (*fn)(void *, unsigned long), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 554a7e8ecb07..d52c1b3ce589 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -91,10 +91,12 @@
#define PSTATE_PAN pstate_field(0, 4)
#define PSTATE_UAO pstate_field(0, 3)
#define PSTATE_SSBS pstate_field(3, 1)
+#define PSTATE_TCO pstate_field(3, 4)
#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -181,6 +183,8 @@
#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
+#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
@@ -218,6 +222,8 @@
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
+#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
@@ -321,6 +327,8 @@
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6)
+
#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
@@ -368,6 +376,7 @@
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
@@ -460,6 +469,7 @@
#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
@@ -516,6 +526,7 @@
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
+#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
@@ -531,6 +542,15 @@
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (BIT(44))
+#define SCTLR_ELx_ATA (BIT(43))
+
+#define SCTLR_ELx_TCF_SHIFT 40
+#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+
+#define SCTLR_ELx_ITFSB (BIT(37))
#define SCTLR_ELx_ENIA (BIT(31))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
@@ -559,6 +579,14 @@
#endif
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_ATA0 (BIT(42))
+
+#define SCTLR_EL1_TCF0_SHIFT 38
+#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
+
#define SCTLR_EL1_BT1 (BIT(36))
#define SCTLR_EL1_BT0 (BIT(35))
#define SCTLR_EL1_UCI (BIT(26))
@@ -587,6 +615,7 @@
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
+ SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
@@ -595,6 +624,7 @@
#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
#define MAIR_ATTR_NORMAL_WT UL(0xbb)
+#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
@@ -636,14 +666,22 @@
#define ID_AA64ISAR1_APA_SHIFT 4
#define ID_AA64ISAR1_DPB_SHIFT 0
-#define ID_AA64ISAR1_APA_NI 0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_API_NI 0x0
-#define ID_AA64ISAR1_API_IMP_DEF 0x1
-#define ID_AA64ISAR1_GPA_NI 0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_GPI_NI 0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+#define ID_AA64ISAR1_APA_NI 0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_API_NI 0x0
+#define ID_AA64ISAR1_API_IMP_DEF 0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_GPA_NI 0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_GPI_NI 0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
@@ -686,6 +724,10 @@
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
+#define ID_AA64PFR1_MTE_NI 0x0
+#define ID_AA64PFR1_MTE_EL0 0x1
+#define ID_AA64PFR1_MTE 0x2
+
/* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56
#define ID_AA64ZFR0_F32MM_SHIFT 52
@@ -920,6 +962,28 @@
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+/* TCR EL1 Bit Definitions */
+#define SYS_TCR_EL1_TCMA1 (BIT(58))
+#define SYS_TCR_EL1_TCMA0 (BIT(57))
+
+/* GCR_EL1 Definitions */
+#define SYS_GCR_EL1_RRND (BIT(16))
+#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+
+/* RGSR_EL1 Definitions */
+#define SYS_RGSR_EL1_TAG_MASK 0xfUL
+#define SYS_RGSR_EL1_SEED_SHIFT 8
+#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
+
+/* GMID_EL1 field definitions */
+#define SYS_GMID_EL1_BS_SHIFT 0
+#define SYS_GMID_EL1_BS_SIZE 4
+
+/* TFSR{,E0}_EL1 bit definitions */
+#define SYS_TFSR_EL1_TF0_SHIFT 0
+#define SYS_TFSR_EL1_TF1_SHIFT 1
+#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
@@ -1024,6 +1088,13 @@
write_sysreg(__scs_new, sysreg); \
} while (0)
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 5e784e16ee89..1fbab854a51b 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -67,6 +67,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
+#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -96,10 +97,11 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
+#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_FSCHECK)
+ _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index d493174415db..cc3f5a33ff9c 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,14 +28,16 @@
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
-#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
+#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
-#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
+#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index cee5928e1b7d..d96dc2c7c09d 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -24,7 +24,7 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-void force_signal_inject(int signal, int code, unsigned long address);
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err);
void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str);
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str);
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 912162f73529..b8f41aa234ee 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -74,6 +74,6 @@
#define HWCAP2_DGH (1 << 15)
#define HWCAP2_RNG (1 << 16)
#define HWCAP2_BTI (1 << 17)
-/* reserved for HWCAP2_MTE (1 << 18) */
+#define HWCAP2_MTE (1 << 18)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index ba85bb23f060..7d804fd0a682 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+
+/*
+ * Only two states can be presented by the host kernel:
+ * - NOT_REQUIRED: the guest doesn't need to do anything
+ * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
+ *
+ * All the other values are deprecated. The host still accepts all
+ * values (they are ABI), but will narrow them to the above two.
+ */
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h
index 6fdd71eb644f..1e6482a838e1 100644
--- a/arch/arm64/include/uapi/asm/mman.h
+++ b/arch/arm64/include/uapi/asm/mman.h
@@ -5,5 +5,6 @@
#include <asm-generic/mman.h>
#define PROT_BTI 0x10 /* BTI guarded page */
+#define PROT_MTE 0x20 /* Normal Tagged mapping */
#endif /* ! _UAPI__ASM_MMAN_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 42cbe34d95ce..758ae984ff97 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -51,6 +51,7 @@
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
#define PSR_DIT_BIT 0x01000000
+#define PSR_TCO_BIT 0x02000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
@@ -75,6 +76,9 @@
/* syscall emulation path in ptrace */
#define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32
+/* MTE allocation tag access */
+#define PTRACE_PEEKMTETAGS 33
+#define PTRACE_POKEMTETAGS 34
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index a561cbb91d4d..bbaf0bc4ad60 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -3,8 +3,6 @@
# Makefile for the linux kernel.
#
-CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
-AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
@@ -19,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o
+ syscall.o proton-pack.o
targets += efi-entry.o
@@ -59,9 +57,9 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 455966401102..cada0b816c8a 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -298,8 +298,21 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
- pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
- return NULL;
+ if (memblock_is_map_memory(phys) ||
+ !memblock_is_region_memory(phys, size)) {
+ pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
+ return NULL;
+ }
+ /*
+ * Mapping kernel memory is permitted if the region in
+ * question is covered by a single memblock with the
+ * NOMAP attribute set: this enables the use of ACPI
+ * table overrides passed via initramfs, which are
+ * reserved in memory using arch_reserve_mem_area()
+ * below. As this particular use case only requires
+ * read access, fall through to the R/O mapping case.
+ */
+ fallthrough;
case EFI_RUNTIME_SERVICES_CODE:
/*
@@ -322,7 +335,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
- /* fall through */
+ fallthrough;
default:
if (region->attribute & EFI_MEMORY_WB)
@@ -388,3 +401,8 @@ int apei_claim_sea(struct pt_regs *regs)
return err;
}
+
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_mark_nomap(addr, size);
+}
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 4a18055b2ff9..37721eb6f9a1 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -35,6 +35,10 @@ SYM_CODE_START(__cpu_soft_restart)
mov_q x13, SCTLR_ELx_FLAGS
bic x12, x12, x13
pre_disable_mmu_workaround
+ /*
+ * either disable EL1&0 translation regime or disable EL2&0 translation
+ * regime if HCR_EL2.E2H == 1
+ */
msr sctlr_el1, x12
isb
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 6bd1d3ad037a..24d75af344b1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
-atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
-static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
- int i;
-
- for (i = 0; i < SZ_2K; i += 0x80)
- memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
-
- __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
-}
-
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- static DEFINE_RAW_SPINLOCK(bp_lock);
- int cpu, slot = -1;
-
- /*
- * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
- * we're a guest. Skip the hyp-vectors work.
- */
- if (!hyp_vecs_start) {
- __this_cpu_write(bp_hardening_data.fn, fn);
- return;
- }
-
- raw_spin_lock(&bp_lock);
- for_each_possible_cpu(cpu) {
- if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
- slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
- break;
- }
- }
-
- if (slot == -1) {
- slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
- __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
- }
-
- __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
- __this_cpu_write(bp_hardening_data.fn, fn);
- raw_spin_unlock(&bp_lock);
-}
-#else
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- __this_cpu_write(bp_hardening_data.fn, fn);
-}
-#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-
-#include <linux/arm-smccc.h>
-
-static void __maybe_unused call_smc_arch_workaround_1(void)
-{
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void call_hvc_arch_workaround_1(void)
-{
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void qcom_link_stack_sanitization(void)
-{
- u64 tmp;
-
- asm volatile("mov %0, x30 \n"
- ".rept 16 \n"
- "bl . + 4 \n"
- ".endr \n"
- "mov x30, %0 \n"
- : "=&r" (tmp));
-}
-
-static bool __nospectre_v2;
-static int __init parse_nospectre_v2(char *str)
-{
- __nospectre_v2 = true;
- return 0;
-}
-early_param("nospectre_v2", parse_nospectre_v2);
-
-/*
- * -1: No workaround
- * 0: No workaround required
- * 1: Workaround installed
- */
-static int detect_harden_bp_fw(void)
-{
- bp_hardening_cb_t cb;
- void *smccc_start, *smccc_end;
- struct arm_smccc_res res;
- u32 midr = read_cpuid_id();
-
- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-
- switch ((int)res.a0) {
- case 1:
- /* Firmware says we're just fine */
- return 0;
- case 0:
- break;
- default:
- return -1;
- }
-
- switch (arm_smccc_1_1_get_conduit()) {
- case SMCCC_CONDUIT_HVC:
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
- break;
-
-#if IS_ENABLED(CONFIG_KVM)
- case SMCCC_CONDUIT_SMC:
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc;
- smccc_end = __smccc_workaround_1_smc +
- __SMCCC_WORKAROUND_1_SMC_SZ;
- break;
-#endif
-
- default:
- return -1;
- }
-
- if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
- ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
- cb = qcom_link_stack_sanitization;
-
- if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
- install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
- return 1;
-}
-
-DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
-static bool __ssb_safe = true;
-
-static const struct ssbd_options {
- const char *str;
- int state;
-} ssbd_options[] = {
- { "force-on", ARM64_SSBD_FORCE_ENABLE, },
- { "force-off", ARM64_SSBD_FORCE_DISABLE, },
- { "kernel", ARM64_SSBD_KERNEL, },
-};
-
-static int __init ssbd_cfg(char *buf)
-{
- int i;
-
- if (!buf || !buf[0])
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
- int len = strlen(ssbd_options[i].str);
-
- if (strncmp(buf, ssbd_options[i].str, len))
- continue;
-
- ssbd_state = ssbd_options[i].state;
- return 0;
- }
-
- return -EINVAL;
-}
-early_param("ssbd", ssbd_cfg);
-
-void __init arm64_update_smccc_conduit(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr,
- int nr_inst)
-{
- u32 insn;
-
- BUG_ON(nr_inst != 1);
-
- switch (arm_smccc_1_1_get_conduit()) {
- case SMCCC_CONDUIT_HVC:
- insn = aarch64_insn_get_hvc_value();
- break;
- case SMCCC_CONDUIT_SMC:
- insn = aarch64_insn_get_smc_value();
- break;
- default:
- return;
- }
-
- *updptr = cpu_to_le32(insn);
-}
-
-void __init arm64_enable_wa2_handling(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr,
- int nr_inst)
-{
- BUG_ON(nr_inst != 1);
- /*
- * Only allow mitigation on EL1 entry/exit and guest
- * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
- * be flipped.
- */
- if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
- *updptr = cpu_to_le32(aarch64_insn_gen_nop());
-}
-
-void arm64_set_ssbd_mitigation(bool state)
-{
- int conduit;
-
- if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
- pr_info_once("SSBD disabled by kernel configuration\n");
- return;
- }
-
- if (this_cpu_has_cap(ARM64_SSBS)) {
- if (state)
- asm volatile(SET_PSTATE_SSBS(0));
- else
- asm volatile(SET_PSTATE_SSBS(1));
- return;
- }
-
- conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
- NULL);
-
- WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
-}
-
-static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
- int scope)
-{
- struct arm_smccc_res res;
- bool required = true;
- s32 val;
- bool this_cpu_safe = false;
- int conduit;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
- if (cpu_mitigations_off())
- ssbd_state = ARM64_SSBD_FORCE_DISABLE;
-
- /* delay setting __ssb_safe until we get a firmware response */
- if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
- this_cpu_safe = true;
-
- if (this_cpu_has_cap(ARM64_SSBS)) {
- if (!this_cpu_safe)
- __ssb_safe = false;
- required = false;
- goto out_printmsg;
- }
-
- conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_2, &res);
-
- if (conduit == SMCCC_CONDUIT_NONE) {
- ssbd_state = ARM64_SSBD_UNKNOWN;
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
- }
-
- val = (s32)res.a0;
-
- switch (val) {
- case SMCCC_RET_NOT_SUPPORTED:
- ssbd_state = ARM64_SSBD_UNKNOWN;
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
-
- /* machines with mixed mitigation requirements must not return this */
- case SMCCC_RET_NOT_REQUIRED:
- pr_info_once("%s mitigation not required\n", entry->desc);
- ssbd_state = ARM64_SSBD_MITIGATED;
- return false;
-
- case SMCCC_RET_SUCCESS:
- __ssb_safe = false;
- required = true;
- break;
-
- case 1: /* Mitigation not required on this CPU */
- required = false;
- break;
-
- default:
- WARN_ON(1);
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
- }
-
- switch (ssbd_state) {
- case ARM64_SSBD_FORCE_DISABLE:
- arm64_set_ssbd_mitigation(false);
- required = false;
- break;
-
- case ARM64_SSBD_KERNEL:
- if (required) {
- __this_cpu_write(arm64_ssbd_callback_required, 1);
- arm64_set_ssbd_mitigation(true);
- }
- break;
-
- case ARM64_SSBD_FORCE_ENABLE:
- arm64_set_ssbd_mitigation(true);
- required = true;
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
-out_printmsg:
- switch (ssbd_state) {
- case ARM64_SSBD_FORCE_DISABLE:
- pr_info_once("%s disabled from command-line\n", entry->desc);
- break;
-
- case ARM64_SSBD_FORCE_ENABLE:
- pr_info_once("%s forced from command-line\n", entry->desc);
- break;
- }
-
- return required;
-}
-
-/* known invulnerable cores */
-static const struct midr_range arm64_ssb_cpus[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
- MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
- {},
-};
-
#ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-/* Track overall mitigation state. We are only mitigated if all cores are ok */
-static bool __hardenbp_enab = true;
-static bool __spectrev2_safe = true;
-
-int get_spectre_v2_workaround_state(void)
-{
- if (__spectrev2_safe)
- return ARM64_BP_HARDEN_NOT_REQUIRED;
-
- if (!__hardenbp_enab)
- return ARM64_BP_HARDEN_UNKNOWN;
-
- return ARM64_BP_HARDEN_WA_NEEDED;
-}
-
-/*
- * List of CPUs that do not need any Spectre-v2 mitigation at all.
- */
-static const struct midr_range spectre_v2_safe_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
- MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
- MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
- MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
- { /* sentinel */ }
-};
-
-/*
- * Track overall bp hardening for all heterogeneous cores in the machine.
- * We are only considered "safe" if all booted cores are known safe.
- */
-static bool __maybe_unused
-check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
-{
- int need_wa;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
- /* If the CPU has CSV2 set, we're safe */
- if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
- ID_AA64PFR0_CSV2_SHIFT))
- return false;
-
- /* Alternatively, we have a list of unaffected CPUs */
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
- return false;
-
- /* Fallback to firmware detection */
- need_wa = detect_harden_bp_fw();
- if (!need_wa)
- return false;
-
- __spectrev2_safe = false;
-
- if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
- pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
- __hardenbp_enab = false;
- return false;
- }
-
- /* forced off */
- if (__nospectre_v2 || cpu_mitigations_off()) {
- pr_info_once("spectrev2 mitigation disabled by command line option\n");
- __hardenbp_enab = false;
- return false;
- }
-
- if (need_wa < 0) {
- pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
- __hardenbp_enab = false;
- }
-
- return (need_wa > 0);
-}
-
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
@@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ .desc = "Spectre-v2",
+ .capability = ARM64_SPECTRE_V2,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .matches = check_branch_predictor,
+ .matches = has_spectre_v2,
+ .cpu_enable = spectre_v2_enable_mitigation,
},
#ifdef CONFIG_RANDOMIZE_BASE
{
@@ -899,17 +465,23 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .desc = "Speculative Store Bypass Disable",
- .capability = ARM64_SSBD,
+ .desc = "Spectre-v4",
+ .capability = ARM64_SPECTRE_V4,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .matches = has_ssbd_mitigation,
- .midr_range_list = arm64_ssb_cpus,
+ .matches = has_spectre_v4,
+ .cpu_enable = spectre_v4_enable_mitigation,
},
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ /*
+ * We need to allow affected CPUs to come in late, but
+ * also need the non-affected CPUs to be able to come
+ * in at any point in time. Wonderful.
+ */
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
@@ -954,40 +526,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
-
-ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
-}
-
-ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- switch (get_spectre_v2_workaround_state()) {
- case ARM64_BP_HARDEN_NOT_REQUIRED:
- return sprintf(buf, "Not affected\n");
- case ARM64_BP_HARDEN_WA_NEEDED:
- return sprintf(buf, "Mitigation: Branch predictor hardening\n");
- case ARM64_BP_HARDEN_UNKNOWN:
- default:
- return sprintf(buf, "Vulnerable\n");
- }
-}
-
-ssize_t cpu_show_spec_store_bypass(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (__ssb_safe)
- return sprintf(buf, "Not affected\n");
-
- switch (ssbd_state) {
- case ARM64_SSBD_KERNEL:
- case ARM64_SSBD_FORCE_ENABLE:
- if (IS_ENABLED(CONFIG_ARM64_SSBD))
- return sprintf(buf,
- "Mitigation: Speculative Store Bypass disabled via prctl\n");
- }
-
- return sprintf(buf, "Vulnerable\n");
-}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a389b999482e..dcc165b3fc04 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -75,6 +75,7 @@
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
@@ -197,9 +198,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -227,7 +228,9 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
ARM64_FTR_END,
@@ -487,7 +490,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_pfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -686,7 +689,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
- /* Fallthrough */
+ fallthrough;
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
@@ -1111,6 +1114,7 @@ u64 read_sanitised_ftr_reg(u32 id)
return 0;
return regp->sys_val;
}
+EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
#define read_sysreg_case(r) \
case r: return read_sysreg_s(r)
@@ -1443,6 +1447,7 @@ static inline void __cpu_enable_hw_dbm(void)
write_sysreg(tcr, tcr_el1);
isb();
+ local_flush_tlb_all();
}
static bool cpu_has_broken_dbm(void)
@@ -1583,48 +1588,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21));
}
-#ifdef CONFIG_ARM64_SSBD
-static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
-{
- if (user_mode(regs))
- return 1;
-
- if (instr & BIT(PSTATE_Imm_shift))
- regs->pstate |= PSR_SSBS_BIT;
- else
- regs->pstate &= ~PSR_SSBS_BIT;
-
- arm64_skip_faulting_instruction(regs, 4);
- return 0;
-}
-
-static struct undef_hook ssbs_emulation_hook = {
- .instr_mask = ~(1U << PSTATE_Imm_shift),
- .instr_val = 0xd500401f | PSTATE_SSBS,
- .fn = ssbs_emulation_handler,
-};
-
-static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
-{
- static bool undef_hook_registered = false;
- static DEFINE_RAW_SPINLOCK(hook_lock);
-
- raw_spin_lock(&hook_lock);
- if (!undef_hook_registered) {
- register_undef_hook(&ssbs_emulation_hook);
- undef_hook_registered = true;
- }
- raw_spin_unlock(&hook_lock);
-
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
- sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
- arm64_set_ssbd_mitigation(false);
- } else {
- arm64_set_ssbd_mitigation(true);
- }
-}
-#endif /* CONFIG_ARM64_SSBD */
-
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
@@ -1648,11 +1611,37 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_PTR_AUTH
-static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
- int __unused)
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
{
- return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
- __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+ int boot_val, sec_val;
+
+ /* We don't expect to be called with SCOPE_SYSTEM */
+ WARN_ON(scope == SCOPE_SYSTEM);
+ /*
+ * The ptr-auth feature levels are not intercompatible with lower
+ * levels. Hence we must match ptr-auth feature level of the secondary
+ * CPUs with that of the boot CPU. The level of boot cpu is fetched
+ * from the sanitised register whereas direct register read is done for
+ * the secondary CPUs.
+ * The sanitised feature state is guaranteed to match that of the
+ * boot CPU as a mismatched secondary CPU is parked before it gets
+ * a chance to update the state, with the capability.
+ */
+ boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ if (scope & SCOPE_BOOT_CPU)
+ return boot_val >= entry->min_field_value;
+ /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
+ sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+ entry->field_pos, entry->sign);
+ return sec_val == boot_val;
+}
+
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
+ has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
}
static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
@@ -1702,6 +1691,22 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_BTI */
+#ifdef CONFIG_ARM64_MTE
+static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+{
+ static bool cleared_zero_page = false;
+
+ /*
+ * Clear the tags in the zero page. This needs to be done via the
+ * linear map which has the Tagged attribute.
+ */
+ if (!cleared_zero_page) {
+ cleared_zero_page = true;
+ mte_clear_page_tags(lm_alias(empty_zero_page));
+ }
+}
+#endif /* CONFIG_ARM64_MTE */
+
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -1976,19 +1981,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1,
},
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,
- .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
- .cpu_enable = cpu_enable_ssbs,
},
-#endif
#ifdef CONFIG_ARM64_CNP
{
.desc = "Common not Private translations",
@@ -2021,7 +2023,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_APA_SHIFT,
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
- .matches = has_cpuid_feature,
+ .matches = has_address_auth_cpucap,
},
{
.desc = "Address authentication (IMP DEF algorithm)",
@@ -2031,12 +2033,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_API_SHIFT,
.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
- .matches = has_cpuid_feature,
+ .matches = has_address_auth_cpucap,
},
{
.capability = ARM64_HAS_ADDRESS_AUTH,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
- .matches = has_address_auth,
+ .matches = has_address_auth_metacap,
},
{
.desc = "Generic authentication (architected algorithm)",
@@ -2121,6 +2123,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
},
#endif
+#ifdef CONFIG_ARM64_MTE
+ {
+ .desc = "Memory Tagging Extension",
+ .capability = ARM64_MTE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .min_field_value = ID_AA64PFR1_MTE,
+ .sign = FTR_UNSIGNED,
+ .cpu_enable = cpu_enable_mte,
+ },
+#endif /* CONFIG_ARM64_MTE */
{},
};
@@ -2237,6 +2252,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
+#ifdef CONFIG_ARM64_MTE
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+#endif /* CONFIG_ARM64_MTE */
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 393c6fb1f1cb..6a7bb3729d60 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -43,94 +43,93 @@ static const char *icache_policy_str[] = {
unsigned long __icache_flags;
static const char *const hwcap_str[] = {
- "fp",
- "asimd",
- "evtstrm",
- "aes",
- "pmull",
- "sha1",
- "sha2",
- "crc32",
- "atomics",
- "fphp",
- "asimdhp",
- "cpuid",
- "asimdrdm",
- "jscvt",
- "fcma",
- "lrcpc",
- "dcpop",
- "sha3",
- "sm3",
- "sm4",
- "asimddp",
- "sha512",
- "sve",
- "asimdfhm",
- "dit",
- "uscat",
- "ilrcpc",
- "flagm",
- "ssbs",
- "sb",
- "paca",
- "pacg",
- "dcpodp",
- "sve2",
- "sveaes",
- "svepmull",
- "svebitperm",
- "svesha3",
- "svesm4",
- "flagm2",
- "frint",
- "svei8mm",
- "svef32mm",
- "svef64mm",
- "svebf16",
- "i8mm",
- "bf16",
- "dgh",
- "rng",
- "bti",
- /* reserved for "mte" */
- NULL
+ [KERNEL_HWCAP_FP] = "fp",
+ [KERNEL_HWCAP_ASIMD] = "asimd",
+ [KERNEL_HWCAP_EVTSTRM] = "evtstrm",
+ [KERNEL_HWCAP_AES] = "aes",
+ [KERNEL_HWCAP_PMULL] = "pmull",
+ [KERNEL_HWCAP_SHA1] = "sha1",
+ [KERNEL_HWCAP_SHA2] = "sha2",
+ [KERNEL_HWCAP_CRC32] = "crc32",
+ [KERNEL_HWCAP_ATOMICS] = "atomics",
+ [KERNEL_HWCAP_FPHP] = "fphp",
+ [KERNEL_HWCAP_ASIMDHP] = "asimdhp",
+ [KERNEL_HWCAP_CPUID] = "cpuid",
+ [KERNEL_HWCAP_ASIMDRDM] = "asimdrdm",
+ [KERNEL_HWCAP_JSCVT] = "jscvt",
+ [KERNEL_HWCAP_FCMA] = "fcma",
+ [KERNEL_HWCAP_LRCPC] = "lrcpc",
+ [KERNEL_HWCAP_DCPOP] = "dcpop",
+ [KERNEL_HWCAP_SHA3] = "sha3",
+ [KERNEL_HWCAP_SM3] = "sm3",
+ [KERNEL_HWCAP_SM4] = "sm4",
+ [KERNEL_HWCAP_ASIMDDP] = "asimddp",
+ [KERNEL_HWCAP_SHA512] = "sha512",
+ [KERNEL_HWCAP_SVE] = "sve",
+ [KERNEL_HWCAP_ASIMDFHM] = "asimdfhm",
+ [KERNEL_HWCAP_DIT] = "dit",
+ [KERNEL_HWCAP_USCAT] = "uscat",
+ [KERNEL_HWCAP_ILRCPC] = "ilrcpc",
+ [KERNEL_HWCAP_FLAGM] = "flagm",
+ [KERNEL_HWCAP_SSBS] = "ssbs",
+ [KERNEL_HWCAP_SB] = "sb",
+ [KERNEL_HWCAP_PACA] = "paca",
+ [KERNEL_HWCAP_PACG] = "pacg",
+ [KERNEL_HWCAP_DCPODP] = "dcpodp",
+ [KERNEL_HWCAP_SVE2] = "sve2",
+ [KERNEL_HWCAP_SVEAES] = "sveaes",
+ [KERNEL_HWCAP_SVEPMULL] = "svepmull",
+ [KERNEL_HWCAP_SVEBITPERM] = "svebitperm",
+ [KERNEL_HWCAP_SVESHA3] = "svesha3",
+ [KERNEL_HWCAP_SVESM4] = "svesm4",
+ [KERNEL_HWCAP_FLAGM2] = "flagm2",
+ [KERNEL_HWCAP_FRINT] = "frint",
+ [KERNEL_HWCAP_SVEI8MM] = "svei8mm",
+ [KERNEL_HWCAP_SVEF32MM] = "svef32mm",
+ [KERNEL_HWCAP_SVEF64MM] = "svef64mm",
+ [KERNEL_HWCAP_SVEBF16] = "svebf16",
+ [KERNEL_HWCAP_I8MM] = "i8mm",
+ [KERNEL_HWCAP_BF16] = "bf16",
+ [KERNEL_HWCAP_DGH] = "dgh",
+ [KERNEL_HWCAP_RNG] = "rng",
+ [KERNEL_HWCAP_BTI] = "bti",
+ [KERNEL_HWCAP_MTE] = "mte",
};
#ifdef CONFIG_COMPAT
+#define COMPAT_KERNEL_HWCAP(x) const_ilog2(COMPAT_HWCAP_ ## x)
static const char *const compat_hwcap_str[] = {
- "swp",
- "half",
- "thumb",
- "26bit",
- "fastmult",
- "fpa",
- "vfp",
- "edsp",
- "java",
- "iwmmxt",
- "crunch",
- "thumbee",
- "neon",
- "vfpv3",
- "vfpv3d16",
- "tls",
- "vfpv4",
- "idiva",
- "idivt",
- "vfpd32",
- "lpae",
- "evtstrm",
- NULL
+ [COMPAT_KERNEL_HWCAP(SWP)] = "swp",
+ [COMPAT_KERNEL_HWCAP(HALF)] = "half",
+ [COMPAT_KERNEL_HWCAP(THUMB)] = "thumb",
+ [COMPAT_KERNEL_HWCAP(26BIT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(FAST_MULT)] = "fastmult",
+ [COMPAT_KERNEL_HWCAP(FPA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(VFP)] = "vfp",
+ [COMPAT_KERNEL_HWCAP(EDSP)] = "edsp",
+ [COMPAT_KERNEL_HWCAP(JAVA)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(IWMMXT)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(CRUNCH)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(THUMBEE)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(NEON)] = "neon",
+ [COMPAT_KERNEL_HWCAP(VFPv3)] = "vfpv3",
+ [COMPAT_KERNEL_HWCAP(VFPV3D16)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(TLS)] = "tls",
+ [COMPAT_KERNEL_HWCAP(VFPv4)] = "vfpv4",
+ [COMPAT_KERNEL_HWCAP(IDIVA)] = "idiva",
+ [COMPAT_KERNEL_HWCAP(IDIVT)] = "idivt",
+ [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */
+ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae",
+ [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm",
};
+#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x)
static const char *const compat_hwcap2_str[] = {
- "aes",
- "pmull",
- "sha1",
- "sha2",
- "crc32",
- NULL
+ [COMPAT_KERNEL_HWCAP2(AES)] = "aes",
+ [COMPAT_KERNEL_HWCAP2(PMULL)] = "pmull",
+ [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1",
+ [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2",
+ [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32",
};
#endif /* CONFIG_COMPAT */
@@ -166,16 +165,25 @@ static int c_show(struct seq_file *m, void *v)
seq_puts(m, "Features\t:");
if (compat) {
#ifdef CONFIG_COMPAT
- for (j = 0; compat_hwcap_str[j]; j++)
- if (compat_elf_hwcap & (1 << j))
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) {
+ if (compat_elf_hwcap & (1 << j)) {
+ /*
+ * Warn once if any feature should not
+ * have been present on arm64 platform.
+ */
+ if (WARN_ON_ONCE(!compat_hwcap_str[j]))
+ continue;
+
seq_printf(m, " %s", compat_hwcap_str[j]);
+ }
+ }
- for (j = 0; compat_hwcap2_str[j]; j++)
+ for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++)
if (compat_elf_hwcap2 & (1 << j))
seq_printf(m, " %s", compat_hwcap2_str[j]);
#endif /* CONFIG_COMPAT */
} else {
- for (j = 0; hwcap_str[j]; j++)
+ for (j = 0; j < ARRAY_SIZE(hwcap_str); j++)
if (cpu_have_feature(j))
seq_printf(m, " %s", hwcap_str[j]);
}
@@ -327,7 +335,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
default:
- /* Fallthrough */
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 7310a4f7f993..fa76151de6ff 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -384,7 +384,7 @@ void __init debug_traps_init(void)
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
TRAP_TRACE, "single-step handler");
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
- TRAP_BRKPT, "ptrace BRK handler");
+ TRAP_BRKPT, "BRK handler");
}
/* Re-enable single step for syscall restarting. */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index d3be9dbf5490..43d4c329775f 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
}
NOKPROBE_SYMBOL(el1_dbg);
+static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ local_daif_inherit(regs);
+ do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el1_fpac);
+
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BRK64:
el1_dbg(regs, esr);
break;
+ case ESR_ELx_EC_FPAC:
+ el1_fpac(regs, esr);
+ break;
default:
el1_inv(regs, esr);
}
@@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(el0_svc);
+static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el0_fpac);
+
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BRK64:
el0_dbg(regs, esr);
break;
+ case ESR_ELx_EC_FPAC:
+ el0_fpac(regs, esr);
+ break;
default:
el0_inv(regs, esr);
}
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index f880dd63ddc3..2ca395c25448 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -32,6 +32,7 @@ SYM_FUNC_START(fpsimd_load_state)
SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE
+
SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
@@ -46,4 +47,28 @@ SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1
ret
SYM_FUNC_END(sve_get_vl)
+
+/*
+ * Load SVE state from FPSIMD state.
+ *
+ * x0 = pointer to struct fpsimd_state
+ * x1 = VQ - 1
+ *
+ * Each SVE vector will be loaded with the first 128-bits taken from FPSIMD
+ * and the rest zeroed. All the other SVE registers will be zeroed.
+ */
+SYM_FUNC_START(sve_load_from_fpsimd_state)
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ _for n, 0, 15, _sve_pfalse \n
+ _sve_wrffr 0
+ ret
+SYM_FUNC_END(sve_load_from_fpsimd_state)
+
+/* Zero all SVE registers but the first 128-bits of each vector */
+SYM_FUNC_START(sve_flush_live)
+ sve_flush
+ ret
+SYM_FUNC_END(sve_flush_live)
+
#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2646178c8329..f30007dff35f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -132,9 +132,8 @@ alternative_else_nop_endif
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
- b .L__asm_ssbd_skip\@
+alternative_cb spectre_v4_patch_fw_mitigation_enable
+ b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, .L__asm_ssbd_skip\@
@@ -142,10 +141,35 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
-alternative_cb arm64_update_smccc_conduit
+alternative_cb spectre_v4_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
+ .endm
+
+ /* Check for MTE asynchronous tag check faults */
+ .macro check_mte_async_tcf, flgs, tmp
+#ifdef CONFIG_ARM64_MTE
+alternative_if_not ARM64_MTE
+ b 1f
+alternative_else_nop_endif
+ mrs_s \tmp, SYS_TFSRE0_EL1
+ tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+ /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+ orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+ str \flgs, [tsk, #TSK_TI_FLAGS]
+ msr_s SYS_TFSRE0_EL1, xzr
+1:
+#endif
+ .endm
+
+ /* Clear the MTE asynchronous tag check faults */
+ .macro clear_mte_async_tcf
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ dsb ish
+ msr_s SYS_TFSRE0_EL1, xzr
+alternative_else_nop_endif
#endif
.endm
@@ -170,19 +194,6 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
- .if \regsize == 32
- /*
- * If we're returning from a 32-bit task on a system affected by
- * 1418040 then re-enable userspace access to the virtual counter.
- */
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
- .endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
@@ -195,6 +206,8 @@ alternative_else_nop_endif
ldr x19, [tsk, #TSK_TI_FLAGS]
disable_step_tsk x19, x20
+ /* Check for asynchronous tag check faults in user space */
+ check_mte_async_tcf x19, x22
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23
@@ -246,6 +259,13 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
str x20, [sp, #S_PMR_SAVE]
alternative_else_nop_endif
+ /* Re-enable tag checking (TCO set on exception entry) */
+#ifdef CONFIG_ARM64_MTE
+alternative_if ARM64_MTE
+ SET_PSTATE_TCO(0)
+alternative_else_nop_endif
+#endif
+
/*
* Registers that may be useful after this macro is invoked:
*
@@ -294,14 +314,6 @@ alternative_else_nop_endif
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
-
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -718,11 +730,9 @@ el0_irq_naked:
bl trace_hardirqs_off
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
-#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -765,6 +775,8 @@ SYM_CODE_START_LOCAL(ret_to_user)
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
+ /* Ignore asynchronous tag check faults in the uaccess routines */
+ clear_mte_async_tcf
enable_step_tsk x1, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 55c8f3ec6705..a6d688c10745 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -32,9 +32,11 @@
#include <linux/swab.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/neon.h>
#include <asm/processor.h>
#include <asm/simd.h>
#include <asm/sigcontext.h>
@@ -312,7 +314,7 @@ static void fpsimd_save(void)
* re-enter user with corrupt state.
* There's no way to recover, so kill it:
*/
- force_signal_inject(SIGKILL, SI_KERNEL, 0);
+ force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
return;
}
@@ -928,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task)
* the SVE access trap will be disabled the next time this task
* reaches ret_to_user.
*
- * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
+ * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SVE access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case.
*/
@@ -936,7 +938,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{
/* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 037421c66b14..d8d9caf02834 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,14 +36,10 @@
#include "efi-header.S"
-#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
+#define __PHYS_OFFSET KERNEL_START
-#if (TEXT_OFFSET & 0xfff) != 0
-#error TEXT_OFFSET must be at least 4KB aligned
-#elif (PAGE_OFFSET & 0x1fffff) != 0
+#if (PAGE_OFFSET & 0x1fffff) != 0
#error PAGE_OFFSET must be at least 2MB aligned
-#elif TEXT_OFFSET > 0x1fffff
-#error TEXT_OFFSET must be less than 2MB
#endif
/*
@@ -55,7 +51,7 @@
* x0 = physical address to the FDT blob.
*
* This code is mostly position independent so you call this at
- * __pa(PAGE_OFFSET + TEXT_OFFSET).
+ * __pa(PAGE_OFFSET).
*
* Note that the callee-saved registers are used for storing variables
* that are useful before the MMU is enabled. The allocations are described
@@ -77,7 +73,7 @@ _head:
b primary_entry // branch to kernel start, magic
.long 0 // reserved
#endif
- le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
+ .quad 0 // Image load offset from start of RAM, little-endian
le64sym _kernel_size_le // Effective size of kernel image, little-endian
le64sym _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
@@ -382,7 +378,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
* Map the kernel image (starting with PHYS_OFFSET).
*/
adrp x0, init_pg_dir
- mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
+ mov_q x5, KIMAGE_VADDR // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD
adrp x6, _end // runtime __pa(_end)
@@ -474,7 +470,7 @@ SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
SYM_DATA_START(kimage_vaddr)
- .quad _text - TEXT_OFFSET
+ .quad _text
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 68e14152d6e9..42003774d261 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/utsname.h>
-#include <linux/version.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
@@ -31,6 +30,7 @@
#include <asm/kexec.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/pgalloc.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
@@ -285,6 +285,117 @@ static int create_safe_exec_page(void *src_start, size_t length,
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
+#ifdef CONFIG_ARM64_MTE
+
+static DEFINE_XARRAY(mte_pages);
+
+static int save_tags(struct page *page, unsigned long pfn)
+{
+ void *tag_storage, *ret;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+static void swsusp_mte_free_storage(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+
+ xa_destroy(&mte_pages);
+}
+
+static int swsusp_mte_save_tags(void)
+{
+ struct zone *zone;
+ unsigned long pfn, max_zone_pfn;
+ int ret = 0;
+ int n = 0;
+
+ if (!system_supports_mte())
+ return 0;
+
+ for_each_populated_zone(zone) {
+ max_zone_pfn = zone_end_pfn(zone);
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ struct page *page = pfn_to_online_page(pfn);
+
+ if (!page)
+ continue;
+
+ if (!test_bit(PG_mte_tagged, &page->flags))
+ continue;
+
+ ret = save_tags(page, pfn);
+ if (ret) {
+ swsusp_mte_free_storage();
+ goto out;
+ }
+
+ n++;
+ }
+ }
+ pr_info("Saved %d MTE pages\n", n);
+
+out:
+ return ret;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+ XA_STATE(xa_state, &mte_pages, 0);
+ int n = 0;
+ void *tags;
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, ULONG_MAX) {
+ unsigned long pfn = xa_state.xa_index;
+ struct page *page = pfn_to_online_page(pfn);
+
+ mte_restore_page_tags(page_address(page), tags);
+
+ mte_free_tag_storage(tags);
+ n++;
+ }
+ xa_unlock(&mte_pages);
+
+ pr_info("Restored %d MTE pages\n", n);
+
+ xa_destroy(&mte_pages);
+}
+
+#else /* CONFIG_ARM64_MTE */
+
+static int swsusp_mte_save_tags(void)
+{
+ return 0;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
int swsusp_arch_suspend(void)
{
int ret = 0;
@@ -302,6 +413,10 @@ int swsusp_arch_suspend(void)
/* make the crash dump kernel image visible/saveable */
crash_prepare_suspend();
+ ret = swsusp_mte_save_tags();
+ if (ret)
+ return ret;
+
sleep_cpu = smp_processor_id();
ret = swsusp_save();
} else {
@@ -315,6 +430,8 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
+ swsusp_mte_restore_tags();
+
/* make the crash dump kernel image protected again */
crash_post_resume();
@@ -332,11 +449,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state
* to what we expect it to be.
*/
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_FORCE_ENABLE:
- case ARM64_SSBD_KERNEL:
- arm64_set_ssbd_mitigation(true);
- }
+ spectre_v4_enable_mitigation(NULL);
}
local_daif_restore(flags);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index af234a1e08b7..712e97c03e54 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
* level.
*/
enable_debug_monitors(dbg_el);
- /* Fall through */
+ fallthrough;
case HW_BREAKPOINT_RESTORE:
/* Setup the address register. */
write_wb_reg(val_reg, i, info->address);
@@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Fallthrough */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Fallthrough */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 9e897c500237..d0f3f35dd0d7 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -64,12 +64,10 @@ __efistub__ctype = _ctype;
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
@@ -103,6 +101,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
KVM_NVHE_ALIAS(gic_pmr_sync);
#endif
+/* EL2 exception handling */
+KVM_NVHE_ALIAS(__start___kvm_ex_table);
+KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index c7d38c660372..7bc3ba897901 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -62,7 +62,6 @@
*/
#define HEAD_SYMBOLS \
DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \
- DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index a107375005bc..6c0de2f60ea9 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -60,16 +60,10 @@ bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
case AARCH64_INSN_HINT_XPACLRI:
case AARCH64_INSN_HINT_PACIA_1716:
case AARCH64_INSN_HINT_PACIB_1716:
- case AARCH64_INSN_HINT_AUTIA_1716:
- case AARCH64_INSN_HINT_AUTIB_1716:
case AARCH64_INSN_HINT_PACIAZ:
case AARCH64_INSN_HINT_PACIASP:
case AARCH64_INSN_HINT_PACIBZ:
case AARCH64_INSN_HINT_PACIBSP:
- case AARCH64_INSN_HINT_AUTIAZ:
- case AARCH64_INSN_HINT_AUTIASP:
- case AARCH64_INSN_HINT_AUTIBZ:
- case AARCH64_INSN_HINT_AUTIBSP:
case AARCH64_INSN_HINT_BTI:
case AARCH64_INSN_HINT_BTIC:
case AARCH64_INSN_HINT_BTIJ:
@@ -176,7 +170,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
bool __kprobes aarch64_insn_is_branch(u32 insn)
{
- /* b, bl, cb*, tb*, b.cond, br, blr */
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
return aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn) ||
@@ -185,8 +179,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_tbz(insn) ||
aarch64_insn_is_tbnz(insn) ||
aarch64_insn_is_ret(insn) ||
+ aarch64_insn_is_ret_auth(insn) ||
aarch64_insn_is_br(insn) ||
+ aarch64_insn_is_br_auth(insn) ||
aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_blr_auth(insn) ||
aarch64_insn_is_bcond(insn);
}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 0ce3a28e3347..2e224435c024 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -305,8 +305,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt_shndx = i;
- else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
- !strcmp(secstrings + sechdrs[i].sh_name,
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1cd1a4d0ed30..2a1ad95d9b2c 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
new file mode 100644
index 000000000000..52a0638ed967
--- /dev/null
+++ b/arch/arm64/kernel/mte.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/prctl.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/string.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/thread_info.h>
+#include <linux/uio.h>
+
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
+#include <asm/ptrace.h>
+#include <asm/sysreg.h>
+
+static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+{
+ pte_t old_pte = READ_ONCE(*ptep);
+
+ if (check_swap && is_swap_pte(old_pte)) {
+ swp_entry_t entry = pte_to_swp_entry(old_pte);
+
+ if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
+ return;
+ }
+
+ mte_clear_page_tags(page_address(page));
+}
+
+void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ long i, nr_pages = compound_nr(page);
+ bool check_swap = nr_pages == 1;
+
+ /* if PG_mte_tagged is set, tags have already been initialised */
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_sync_page_tags(page, ptep, check_swap);
+ }
+}
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = page_address(page1);
+ addr2 = page_address(page2);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+
+ if (!system_supports_mte() || ret)
+ return ret;
+
+ /*
+ * If the page content is identical but at least one of the pages is
+ * tagged, return non-zero to avoid KSM merging. If only one of the
+ * pages is tagged, set_pte_at() may zero or change the tags of the
+ * other page via mte_sync_tags().
+ */
+ if (test_bit(PG_mte_tagged, &page1->flags) ||
+ test_bit(PG_mte_tagged, &page2->flags))
+ return addr1 != addr2;
+
+ return ret;
+}
+
+static void update_sctlr_el1_tcf0(u64 tcf0)
+{
+ /* ISB required for the kernel uaccess routines */
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
+ isb();
+}
+
+static void set_sctlr_el1_tcf0(u64 tcf0)
+{
+ /*
+ * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
+ * optimisation. Disable preemption so that it does not see
+ * the variable update before the SCTLR_EL1.TCF0 one.
+ */
+ preempt_disable();
+ current->thread.sctlr_tcf0 = tcf0;
+ update_sctlr_el1_tcf0(tcf0);
+ preempt_enable();
+}
+
+static void update_gcr_el1_excl(u64 incl)
+{
+ u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
+
+ /*
+ * Note that 'incl' is an include mask (controlled by the user via
+ * prctl()) while GCR_EL1 accepts an exclude mask.
+ * No need for ISB since this only affects EL0 currently, implicit
+ * with ERET.
+ */
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
+}
+
+static void set_gcr_el1_excl(u64 incl)
+{
+ current->thread.gcr_user_incl = incl;
+ update_gcr_el1_excl(incl);
+}
+
+void flush_mte_state(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* clear any pending asynchronous tag fault */
+ dsb(ish);
+ write_sysreg_s(0, SYS_TFSRE0_EL1);
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ /* disable tag checking */
+ set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ /* reset tag generation mask */
+ set_gcr_el1_excl(0);
+}
+
+void mte_thread_switch(struct task_struct *next)
+{
+ if (!system_supports_mte())
+ return;
+
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
+ update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ update_gcr_el1_excl(next->thread.gcr_user_incl);
+}
+
+void mte_suspend_exit(void)
+{
+ if (!system_supports_mte())
+ return;
+
+ update_gcr_el1_excl(current->thread.gcr_user_incl);
+}
+
+long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+{
+ u64 tcf0;
+ u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
+
+ if (!system_supports_mte())
+ return 0;
+
+ switch (arg & PR_MTE_TCF_MASK) {
+ case PR_MTE_TCF_NONE:
+ tcf0 = SCTLR_EL1_TCF0_NONE;
+ break;
+ case PR_MTE_TCF_SYNC:
+ tcf0 = SCTLR_EL1_TCF0_SYNC;
+ break;
+ case PR_MTE_TCF_ASYNC:
+ tcf0 = SCTLR_EL1_TCF0_ASYNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (task != current) {
+ task->thread.sctlr_tcf0 = tcf0;
+ task->thread.gcr_user_incl = gcr_incl;
+ } else {
+ set_sctlr_el1_tcf0(tcf0);
+ set_gcr_el1_excl(gcr_incl);
+ }
+
+ return 0;
+}
+
+long get_mte_ctrl(struct task_struct *task)
+{
+ unsigned long ret;
+
+ if (!system_supports_mte())
+ return 0;
+
+ ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
+
+ switch (task->thread.sctlr_tcf0) {
+ case SCTLR_EL1_TCF0_NONE:
+ return PR_MTE_TCF_NONE;
+ case SCTLR_EL1_TCF0_SYNC:
+ ret |= PR_MTE_TCF_SYNC;
+ break;
+ case SCTLR_EL1_TCF0_ASYNC:
+ ret |= PR_MTE_TCF_ASYNC;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Access MTE tags in another process' address space as given in mm. Update
+ * the number of tags copied. Return 0 if any tags copied, error otherwise.
+ * Inspired by __access_remote_vm().
+ */
+static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct vm_area_struct *vma;
+ void __user *buf = kiov->iov_base;
+ size_t len = kiov->iov_len;
+ int ret;
+ int write = gup_flags & FOLL_WRITE;
+
+ if (!access_ok(buf, len))
+ return -EFAULT;
+
+ if (mmap_read_lock_killable(mm))
+ return -EIO;
+
+ while (len) {
+ unsigned long tags, offset;
+ void *maddr;
+ struct page *page = NULL;
+
+ ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
+ &vma, NULL);
+ if (ret <= 0)
+ break;
+
+ /*
+ * Only copy tags if the page has been mapped as PROT_MTE
+ * (PG_mte_tagged set). Otherwise the tags are not valid and
+ * not accessible to user. Moreover, an mprotect(PROT_MTE)
+ * would cause the existing tags to be cleared if the page
+ * was never mapped with PROT_MTE.
+ */
+ if (!test_bit(PG_mte_tagged, &page->flags)) {
+ ret = -EOPNOTSUPP;
+ put_page(page);
+ break;
+ }
+
+ /* limit access to the end of the page */
+ offset = offset_in_page(addr);
+ tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
+
+ maddr = page_address(page);
+ if (write) {
+ tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
+ set_page_dirty_lock(page);
+ } else {
+ tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
+ }
+ put_page(page);
+
+ /* error accessing the tracer's buffer */
+ if (!tags)
+ break;
+
+ len -= tags;
+ buf += tags;
+ addr += tags * MTE_GRANULE_SIZE;
+ }
+ mmap_read_unlock(mm);
+
+ /* return an error if no tags copied */
+ kiov->iov_len = buf - kiov->iov_base;
+ if (!kiov->iov_len) {
+ /* check for error accessing the tracee's address space */
+ if (ret <= 0)
+ return -EIO;
+ else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy MTE tags in another process' address space at 'addr' to/from tracer's
+ * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
+ */
+static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
+ struct iovec *kiov, unsigned int gup_flags)
+{
+ struct mm_struct *mm;
+ int ret;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return -EPERM;
+
+ if (!tsk->ptrace || (current != tsk->parent) ||
+ ((get_dumpable(mm) != SUID_DUMP_USER) &&
+ !ptracer_capable(tsk, mm->user_ns))) {
+ mmput(mm);
+ return -EPERM;
+ }
+
+ ret = __access_remote_tags(mm, addr, kiov, gup_flags);
+ mmput(mm);
+
+ return ret;
+}
+
+int mte_ptrace_copy_tags(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ struct iovec kiov;
+ struct iovec __user *uiov = (void __user *)data;
+ unsigned int gup_flags = FOLL_FORCE;
+
+ if (!system_supports_mte())
+ return -EIO;
+
+ if (get_user(kiov.iov_base, &uiov->iov_base) ||
+ get_user(kiov.iov_len, &uiov->iov_len))
+ return -EFAULT;
+
+ if (request == PTRACE_POKEMTETAGS)
+ gup_flags |= FOLL_WRITE;
+
+ /* align addr to the MTE tag granule */
+ addr &= MTE_GRANULE_MASK;
+
+ ret = access_remote_tags(child, addr, &kiov, gup_flags);
+ if (!ret)
+ ret = put_user(kiov.iov_len, &uiov->iov_len);
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 295d66490584..c07d7a034941 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
struct pv_time_stolen_time_region *reg;
reg = per_cpu_ptr(&stolen_time_region, cpu);
- if (!reg->kaddr) {
- pr_warn_once("stolen time enabled but not configured for cpu %d\n",
- cpu);
+
+ /*
+ * paravirt_steal_clock() may be called before the CPU
+ * online notification callback runs. Until the callback
+ * has run we just return zero.
+ */
+ if (!reg->kaddr)
return 0;
- }
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
}
-static int stolen_time_dying_cpu(unsigned int cpu)
+static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
return 0;
}
-static int init_stolen_time_cpu(unsigned int cpu)
+static int stolen_time_cpu_online(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
return 0;
}
-static int pv_time_init_stolen_time(void)
+static int __init pv_time_init_stolen_time(void)
{
int ret;
- ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
- "hypervisor/arm/pvtime:starting",
- init_stolen_time_cpu, stolen_time_dying_cpu);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "hypervisor/arm/pvtime:online",
+ stolen_time_cpu_online,
+ stolen_time_cpu_down_prepare);
if (ret < 0)
return ret;
return 0;
}
-static bool has_pv_steal_clock(void)
+static bool __init has_pv_steal_clock(void)
{
struct arm_smccc_res res;
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index b0e03e052dd1..88ff471b0bce 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -137,11 +137,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
-static int callchain_trace(struct stackframe *frame, void *data)
+static bool callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, frame->pc);
- return 0;
+ perf_callchain_store(entry, pc);
+ return true;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 462f9a9cc44b..3605f77ad4df 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -69,6 +69,9 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
+
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
@@ -302,13 +305,33 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
.attrs = armv8_pmuv3_format_attrs,
};
+static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+
+ return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+}
+
+static DEVICE_ATTR_RO(slots);
+
+static struct attribute *armv8_pmuv3_caps_attrs[] = {
+ &dev_attr_slots.attr,
+ NULL,
+};
+
+static struct attribute_group armv8_pmuv3_caps_attr_group = {
+ .name = "caps",
+ .attrs = armv8_pmuv3_caps_attrs,
+};
+
/*
* Perf Events' indices
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
/*
@@ -348,6 +371,73 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
static inline u32 armv8pmu_pmcr_read(void)
{
return read_sysreg(pmcr_el0);
@@ -365,28 +455,16 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr)
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
-static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
-{
- return idx >= ARMV8_IDX_CYCLE_COUNTER &&
- idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
-}
-
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline void armv8pmu_select_counter(int idx)
+static inline u32 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
- write_sysreg(counter, pmselr_el0);
- isb();
-}
-static inline u64 armv8pmu_read_evcntr(int idx)
-{
- armv8pmu_select_counter(idx);
- return read_sysreg(pmxevcntr_el0);
+ return read_pmevcntrn(counter);
}
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
@@ -440,15 +518,11 @@ static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
static u64 armv8pmu_read_counter(struct perf_event *event)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u64 value = 0;
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u reading wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
else
value = armv8pmu_read_hw_counter(event);
@@ -458,8 +532,9 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
static inline void armv8pmu_write_evcntr(int idx, u64 value)
{
- armv8pmu_select_counter(idx);
- write_sysreg(value, pmxevcntr_el0);
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
+ write_pmevcntrn(counter, value);
}
static inline void armv8pmu_write_hw_counter(struct perf_event *event,
@@ -477,16 +552,12 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event,
static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
value = armv8pmu_bias_long_counter(event, value);
- if (!armv8pmu_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u writing wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
write_sysreg(value, pmccntr_el0);
else
armv8pmu_write_hw_counter(event, value);
@@ -494,9 +565,10 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
- armv8pmu_select_counter(idx);
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+
val &= ARMV8_PMU_EVTYPE_MASK;
- write_sysreg(val, pmxevtyper_el0);
+ write_pmevtypern(counter, val);
}
static inline void armv8pmu_write_event_type(struct perf_event *event)
@@ -516,7 +588,10 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
- armv8pmu_write_evtype(idx, hwc->config_base);
+ if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ write_sysreg(hwc->config_base, pmccfiltr_el0);
+ else
+ armv8pmu_write_evtype(idx, hwc->config_base);
}
}
@@ -532,6 +607,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
static inline void armv8pmu_enable_counter(u32 mask)
{
+ /*
+ * Make sure event configuration register writes are visible before we
+ * enable the counter.
+ * */
+ isb();
write_sysreg(mask, pmcntenset_el0);
}
@@ -550,6 +630,11 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
static inline void armv8pmu_disable_counter(u32 mask)
{
write_sysreg(mask, pmcntenclr_el0);
+ /*
+ * Make sure the effects of disabling the counter are visible before we
+ * start configuring the event.
+ */
+ isb();
}
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
@@ -606,15 +691,10 @@ static inline u32 armv8pmu_getreset_flags(void)
static void armv8pmu_enable_event(struct perf_event *event)
{
- unsigned long flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -622,7 +702,7 @@ static void armv8pmu_enable_event(struct perf_event *event)
armv8pmu_disable_event_counter(event);
/*
- * Set event (if destined for PMNx counters).
+ * Set event.
*/
armv8pmu_write_event_type(event);
@@ -635,21 +715,10 @@ static void armv8pmu_enable_event(struct perf_event *event)
* Enable counter
*/
armv8pmu_enable_event_counter(event);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_disable_event(struct perf_event *event)
{
- unsigned long flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /*
- * Disable counter and interrupt
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
/*
* Disable counter
*/
@@ -659,30 +728,18 @@ static void armv8pmu_disable_event(struct perf_event *event)
* Disable interrupt for this counter
*/
armv8pmu_disable_event_irq(event);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
@@ -735,20 +792,16 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
+ /*
+ * Perf event overflow will queue the processing of the event as
+ * an irq_work which will be taken care of in the handling of
+ * IPI_IRQ_WORK.
+ */
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
armv8pmu_start(cpu_pmu);
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
-
return IRQ_HANDLED;
}
@@ -997,6 +1050,12 @@ static void __armv8pmu_probe_pmu(void *info)
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ /* store PMMIR_EL1 register for sysfs */
+ if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
+ cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
+ else
+ cpu_pmu->reg_pmmir = 0;
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
@@ -1019,7 +1078,8 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event),
const struct attribute_group *events,
- const struct attribute_group *format)
+ const struct attribute_group *format,
+ const struct attribute_group *caps)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
@@ -1044,104 +1104,112 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
events : &armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
format : &armv8_pmuv3_format_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
+ caps : &armv8_pmuv3_caps_attr_group;
return 0;
}
+static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
+ int (*map_event)(struct perf_event *event))
+{
+ return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
+}
+
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_pmuv3",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
+ armv8_pmuv3_map_event);
}
static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
+ armv8_pmuv3_map_event);
}
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
- armv8_a53_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
+ armv8_a53_map_event);
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53",
- armv8_a53_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
+ armv8_a53_map_event);
}
static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
+ armv8_pmuv3_map_event);
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
- armv8_a57_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
+ armv8_a57_map_event);
}
static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
+ armv8_pmuv3_map_event);
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
- armv8_a57_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
+ armv8_a57_map_event);
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73",
- armv8_a73_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
+ armv8_a73_map_event);
}
static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
+ armv8_pmuv3_map_event);
}
static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
+ armv8_pmuv3_map_event);
}
static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
+ armv8_pmuv3_map_event);
}
static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
+ armv8_pmuv3_map_event);
}
static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
- armv8_pmuv3_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
+ armv8_pmuv3_map_event);
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
- armv8_thunder_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
+ armv8_thunder_map_event);
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
- return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan",
- armv8_vulcan_map_event, NULL, NULL);
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
+ armv8_vulcan_map_event);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 666b225aeb3a..94e8718e7229 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
/*
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
- * we're stuck with it for ABI compatability reasons.
+ * we're stuck with it for ABI compatibility reasons.
*
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 263d5fba4c8a..104101f633b1 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
aarch64_insn_is_msr_imm(insn) ||
aarch64_insn_is_msr_reg(insn) ||
aarch64_insn_is_exception(insn) ||
- aarch64_insn_is_eret(insn))
+ aarch64_insn_is_eret(insn) ||
+ aarch64_insn_is_eret_auth(insn))
return false;
/*
@@ -42,8 +43,10 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
!= AARCH64_INSN_SPCLREG_DAIF;
/*
- * The HINT instruction is is problematic when single-stepping,
- * except for the NOP case.
+ * The HINT instruction is steppable only if it is in whitelist
+ * and the rest of other such instructions are blocked for
+ * single stepping as they may cause exception or other
+ * unintended behaviour.
*/
if (aarch64_insn_is_hint(insn))
return aarch64_insn_is_steppable_hint(insn);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 84ec630b8ab5..4784011cecac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
@@ -52,6 +53,7 @@
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
@@ -123,10 +125,8 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle();
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -241,7 +241,7 @@ static void print_pstate(struct pt_regs *regs)
const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
PSR_BTYPE_SHIFT];
- printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n",
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
@@ -253,6 +253,7 @@ static void print_pstate(struct pt_regs *regs)
pstate & PSR_F_BIT ? 'F' : 'f',
pstate & PSR_PAN_BIT ? '+' : '-',
pstate & PSR_UAO_BIT ? '+' : '-',
+ pstate & PSR_TCO_BIT ? '+' : '-',
btype_str);
}
}
@@ -338,6 +339,7 @@ void flush_thread(void)
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
+ flush_mte_state();
}
void release_thread(struct task_struct *dead_task)
@@ -370,6 +372,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ /* clear any pending asynchronous tag fault raised by the parent */
+ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
+
return 0;
}
@@ -423,8 +428,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- set_ssbs_bit(childregs);
+ spectre_v4_enable_task_mitigation(p);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
@@ -474,8 +478,6 @@ void uao_thread_switch(struct task_struct *next)
*/
static void ssbs_thread_switch(struct task_struct *next)
{
- struct pt_regs *regs = task_pt_regs(next);
-
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
@@ -487,18 +489,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
- if (cpu_have_feature(cpu_feature(SSBS)))
+ if (cpus_have_const_cap(ARM64_SSBS))
return;
- /* If the mitigation is enabled, then we leave SSBS clear. */
- if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
- test_tsk_thread_flag(next, TIF_SSBD))
- return;
-
- if (compat_user_mode(regs))
- set_compat_ssbs_bit(regs);
- else if (user_mode(regs))
- set_ssbs_bit(regs);
+ spectre_v4_enable_task_mitigation(next);
}
/*
@@ -516,6 +510,39 @@ static void entry_task_switch(struct task_struct *next)
}
/*
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
+ * Assuming the virtual counter is enabled at the beginning of times:
+ *
+ * - disable access when switching from a 64bit task to a 32bit task
+ * - enable access when switching from a 32bit task to a 64bit task
+ */
+static void erratum_1418040_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ bool prev32, next32;
+ u64 val;
+
+ if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+ return;
+
+ prev32 = is_compat_thread(task_thread_info(prev));
+ next32 = is_compat_thread(task_thread_info(next));
+
+ if (prev32 == next32)
+ return;
+
+ val = read_sysreg(cntkctl_el1);
+
+ if (!next32)
+ val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+ else
+ val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ write_sysreg(val, cntkctl_el1);
+}
+
+/*
* Thread switching.
*/
__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
@@ -530,6 +557,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next);
+ erratum_1418040_thread_switch(prev, next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -539,6 +567,13 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
*/
dsb(ish);
+ /*
+ * MTE thread switching must happen after the DSB above to ensure that
+ * any asynchronous tag check faults have been logged in the TFSR*_EL1
+ * registers.
+ */
+ mte_thread_switch(next);
+
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -588,6 +623,11 @@ void arch_setup_new_exec(void)
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
ptrauth_thread_init_user(current);
+
+ if (task_spec_ssb_noexec(current)) {
+ arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+ PR_SPEC_ENABLE);
+ }
}
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
@@ -596,11 +636,18 @@ void arch_setup_new_exec(void)
*/
static unsigned int tagged_addr_disabled;
-long set_tagged_addr_ctrl(unsigned long arg)
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
{
- if (is_compat_task())
+ unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
return -EINVAL;
- if (arg & ~PR_TAGGED_ADDR_ENABLE)
+
+ if (system_supports_mte())
+ valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
+
+ if (arg & ~valid_mask)
return -EINVAL;
/*
@@ -610,20 +657,28 @@ long set_tagged_addr_ctrl(unsigned long arg)
if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
return -EINVAL;
- update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+ if (set_mte_ctrl(task, arg) != 0)
+ return -EINVAL;
+
+ update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
return 0;
}
-long get_tagged_addr_ctrl(void)
+long get_tagged_addr_ctrl(struct task_struct *task)
{
- if (is_compat_task())
+ long ret = 0;
+ struct thread_info *ti = task_thread_info(task);
+
+ if (is_compat_thread(ti))
return -EINVAL;
- if (test_thread_flag(TIF_TAGGED_ADDR))
- return PR_TAGGED_ADDR_ENABLE;
+ if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
+ ret = PR_TAGGED_ADDR_ENABLE;
- return 0;
+ ret |= get_mte_ctrl(task);
+
+ return ret;
}
/*
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
new file mode 100644
index 000000000000..68b710f1b43f
--- /dev/null
+++ b/arch/arm64/kernel/proton-pack.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
+ * detailed at:
+ *
+ * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
+ *
+ * This code was originally written hastily under an awful lot of stress and so
+ * aspects of it are somewhat hacky. Unfortunately, changing anything in here
+ * instantly makes me feel ill. Thanks, Jann. Thann.
+ *
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ * Copyright (C) 2020 Google LLC
+ *
+ * "If there's something strange in your neighbourhood, who you gonna call?"
+ *
+ * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/spectre.h>
+#include <asm/traps.h>
+
+/*
+ * We try to ensure that the mitigation state can never change as the result of
+ * onlining a late CPU.
+ */
+static void update_mitigation_state(enum mitigation_state *oldp,
+ enum mitigation_state new)
+{
+ enum mitigation_state state;
+
+ do {
+ state = READ_ONCE(*oldp);
+ if (new <= state)
+ break;
+
+ /* Userspace almost certainly can't deal with this. */
+ if (WARN_ON(system_capabilities_finalized()))
+ break;
+ } while (cmpxchg_relaxed(oldp, state, new) != state);
+}
+
+/*
+ * Spectre v1.
+ *
+ * The kernel can't protect userspace for this one: it's each person for
+ * themselves. Advertise what we're doing and be done with it.
+ */
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+/*
+ * Spectre v2.
+ *
+ * This one sucks. A CPU is either:
+ *
+ * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in software by firmware.
+ * - Mitigated in software by a CPU-specific dance in the kernel.
+ * - Vulnerable.
+ *
+ * It's not unlikely for different CPUs in a big.LITTLE system to fall into
+ * different camps.
+ */
+static enum mitigation_state spectre_v2_state;
+
+static bool __read_mostly __nospectre_v2;
+static int __init parse_spectre_v2_param(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_spectre_v2_param);
+
+static bool spectre_v2_mitigations_off(void)
+{
+ bool ret = __nospectre_v2 || cpu_mitigations_off();
+
+ if (ret)
+ pr_info_once("spectre-v2 mitigation disabled by command line option\n");
+
+ return ret;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ switch (spectre_v2_state) {
+ case SPECTRE_UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case SPECTRE_MITIGATED:
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
+{
+ u64 pfr0;
+ static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ }
+ };
+
+ /* If the CPU has CSV2 set, we're safe */
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return SPECTRE_UNAFFECTED;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
+
+static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
+ return false;
+
+ return true;
+}
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+enum mitigation_state arm64_get_spectre_v2_state(void)
+{
+ return spectre_v2_state;
+}
+
+#ifdef CONFIG_KVM
+#include <asm/cacheflush.h>
+#include <asm/kvm_asm.h>
+
+atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ static DEFINE_RAW_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+ const char *hyp_vecs_start = __smccc_workaround_1_smc;
+ const char *hyp_vecs_end = __smccc_workaround_1_smc +
+ __SMCCC_WORKAROUND_1_SMC_SZ;
+
+ /*
+ * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+ * we're a guest. Skip the hyp-vectors work.
+ */
+ if (!is_hyp_mode_available()) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
+ raw_spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ raw_spin_unlock(&bp_lock);
+}
+#else
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void qcom_link_stack_sanitisation(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
+{
+ bp_hardening_cb_t cb;
+ enum mitigation_state state;
+
+ state = spectre_v2_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v2_mitigations_off())
+ return SPECTRE_VULNERABLE;
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ cb = call_hvc_arch_workaround_1;
+ break;
+
+ case SMCCC_CONDUIT_SMC:
+ cb = call_smc_arch_workaround_1;
+ break;
+
+ default:
+ return SPECTRE_VULNERABLE;
+ }
+
+ install_bp_hardening_cb(cb);
+ return SPECTRE_MITIGATED;
+}
+
+static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
+{
+ u32 midr;
+
+ if (spectre_v2_mitigations_off())
+ return SPECTRE_VULNERABLE;
+
+ midr = read_cpuid_id();
+ if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
+ ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
+ return SPECTRE_VULNERABLE;
+
+ install_bp_hardening_cb(qcom_link_stack_sanitisation);
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v2_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v2_enable_fw_mitigation();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v2_enable_sw_mitigation();
+
+ update_mitigation_state(&spectre_v2_state, state);
+}
+
+/*
+ * Spectre v4.
+ *
+ * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
+ * either:
+ *
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in hardware via PSTATE.SSBS.
+ * - Mitigated in software by firmware (sometimes referred to as SSBD).
+ *
+ * Wait, that doesn't sound so bad, does it? Keep reading...
+ *
+ * A major source of headaches is that the software mitigation is enabled both
+ * on a per-task basis, but can also be forced on for the kernel, necessitating
+ * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
+ * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
+ * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
+ * so you can have systems that have both firmware and SSBS mitigations. This
+ * means we actually have to reject late onlining of CPUs with mitigations if
+ * all of the currently onlined CPUs are safelisted, as the mitigation tends to
+ * be opt-in for userspace. Yes, really, the cure is worse than the disease.
+ *
+ * The only good part is that if the firmware mitigation is present, then it is
+ * present for all CPUs, meaning we don't have to worry about late onlining of a
+ * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
+ *
+ * Give me a VAX-11/780 any day of the week...
+ */
+static enum mitigation_state spectre_v4_state;
+
+/* This is the per-cpu state tracking whether we need to talk to firmware */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+enum spectre_v4_policy {
+ SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
+ SPECTRE_V4_POLICY_MITIGATION_ENABLED,
+ SPECTRE_V4_POLICY_MITIGATION_DISABLED,
+};
+
+static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
+
+static const struct spectre_v4_param {
+ const char *str;
+ enum spectre_v4_policy policy;
+} spectre_v4_params[] = {
+ { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
+ { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
+ { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
+};
+static int __init parse_spectre_v4_param(char *str)
+{
+ int i;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
+ const struct spectre_v4_param *param = &spectre_v4_params[i];
+
+ if (strncmp(str, param->str, strlen(param->str)))
+ continue;
+
+ __spectre_v4_policy = param->policy;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+early_param("ssbd", parse_spectre_v4_param);
+
+/*
+ * Because this was all written in a rush by people working in different silos,
+ * we've ended up with multiple command line options to control the same thing.
+ * Wrap these up in some helpers, which prefer disabling the mitigation if faced
+ * with contradictory parameters. The mitigation is always either "off",
+ * "dynamic" or "on".
+ */
+static bool spectre_v4_mitigations_off(void)
+{
+ bool ret = cpu_mitigations_off() ||
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
+
+ if (ret)
+ pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
+
+ return ret;
+}
+
+/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
+static bool spectre_v4_mitigations_dynamic(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
+}
+
+static bool spectre_v4_mitigations_on(void)
+{
+ return !spectre_v4_mitigations_off() &&
+ __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case SPECTRE_MITIGATED:
+ return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+enum mitigation_state arm64_get_spectre_v4_state(void)
+{
+ return spectre_v4_state;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
+{
+ static const struct midr_range spectre_v4_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ { /* sentinel */ },
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+ return SPECTRE_UNAFFECTED;
+
+ /* CPU features are detected first */
+ if (this_cpu_has_cap(ARM64_SSBS))
+ return SPECTRE_MITIGATED;
+
+ return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ fallthrough;
+ case SMCCC_RET_NOT_REQUIRED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ fallthrough;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ enum mitigation_state state;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+
+ return state != SPECTRE_UNAFFECTED;
+}
+
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+ if (user_mode(regs))
+ return 1;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+ .instr_mask = ~(1U << PSTATE_Imm_shift),
+ .instr_val = 0xd500401f | PSTATE_SSBS,
+ .fn = ssbs_emulation_handler,
+};
+
+static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+{
+ static bool undef_hook_registered = false;
+ static DEFINE_RAW_SPINLOCK(hook_lock);
+ enum mitigation_state state;
+
+ /*
+ * If the system is mitigated but this CPU doesn't have SSBS, then
+ * we must be on the safelist and there's nothing more to do.
+ */
+ state = spectre_v4_get_cpu_hw_mitigation_state();
+ if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+ return state;
+
+ raw_spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+ raw_spin_unlock(&hook_lock);
+
+ if (spectre_v4_mitigations_off()) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ asm volatile(SET_PSTATE_SSBS(1));
+ return SPECTRE_VULNERABLE;
+ }
+
+ /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+ asm volatile(SET_PSTATE_SSBS(0));
+ return SPECTRE_MITIGATED;
+}
+
+/*
+ * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
+ * we fallthrough and check whether firmware needs to be called on this CPU.
+ */
+void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+ if (spectre_v4_mitigations_off())
+ return;
+
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return;
+
+ if (spectre_v4_mitigations_dynamic())
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/*
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
+
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ insn = aarch64_insn_get_hvc_value();
+ break;
+ case SMCCC_CONDUIT_SMC:
+ insn = aarch64_insn_get_smc_value();
+ break;
+ default:
+ return;
+ }
+
+ *updptr = cpu_to_le32(insn);
+}
+
+static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
+{
+ enum mitigation_state state;
+
+ state = spectre_v4_get_cpu_fw_mitigation_state();
+ if (state != SPECTRE_MITIGATED)
+ return state;
+
+ if (spectre_v4_mitigations_off()) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
+ return SPECTRE_VULNERABLE;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
+
+ if (spectre_v4_mitigations_dynamic())
+ __this_cpu_write(arm64_ssbd_callback_required, 1);
+
+ return SPECTRE_MITIGATED;
+}
+
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+ enum mitigation_state state;
+
+ WARN_ON(preemptible());
+
+ state = spectre_v4_enable_hw_mitigation();
+ if (state == SPECTRE_VULNERABLE)
+ state = spectre_v4_enable_fw_mitigation();
+
+ update_mitigation_state(&spectre_v4_state, state);
+}
+
+static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
+{
+ u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ if (state)
+ regs->pstate |= bit;
+ else
+ regs->pstate &= ~bit;
+}
+
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
+{
+ struct pt_regs *regs = task_pt_regs(tsk);
+ bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
+
+ if (spectre_v4_mitigations_off())
+ ssbs = true;
+ else if (spectre_v4_mitigations_dynamic() && !kthread)
+ ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
+
+ __update_pstate_ssbs(regs, ssbs);
+}
+
+/*
+ * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
+ * This is interesting because the "speculation disabled" behaviour can be
+ * configured so that it is preserved across exec(), which means that the
+ * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
+ * from userspace.
+ */
+static void ssbd_prctl_enable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_set_spec_ssb_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static void ssbd_prctl_disable_mitigation(struct task_struct *task)
+{
+ task_clear_spec_ssb_noexec(task);
+ task_clear_spec_ssb_disable(task);
+ clear_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* Enable speculation: disable mitigation */
+ /*
+ * Force disabled speculation prevents it from being
+ * re-enabled.
+ */
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+
+ /*
+ * If the mitigation is forced on, then speculation is forced
+ * off and we again prevent it from being re-enabled.
+ */
+ if (spectre_v4_mitigations_on())
+ return -EPERM;
+
+ ssbd_prctl_disable_mitigation(task);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ /* Force disable speculation: force enable mitigation */
+ /*
+ * If the mitigation is forced off, then speculation is forced
+ * on and we prevent it from being disabled.
+ */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ task_set_spec_ssb_force_disable(task);
+ fallthrough;
+ case PR_SPEC_DISABLE:
+ /* Disable speculation: enable mitigation */
+ /* Same as PR_SPEC_FORCE_DISABLE */
+ if (spectre_v4_mitigations_off())
+ return -EPERM;
+
+ ssbd_prctl_enable_mitigation(task);
+ break;
+ case PR_SPEC_DISABLE_NOEXEC:
+ /* Disable speculation until execve(): enable mitigation */
+ /*
+ * If the mitigation state is forced one way or the other, then
+ * we must fail now before we try to toggle it on execve().
+ */
+ if (task_spec_ssb_force_disable(task) ||
+ spectre_v4_mitigations_off() ||
+ spectre_v4_mitigations_on()) {
+ return -EPERM;
+ }
+
+ ssbd_prctl_enable_mitigation(task);
+ task_set_spec_ssb_noexec(task);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spectre_v4_enable_task_mitigation(task);
+ return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+ switch (spectre_v4_state) {
+ case SPECTRE_UNAFFECTED:
+ return PR_SPEC_NOT_AFFECTED;
+ case SPECTRE_MITIGATED:
+ if (spectre_v4_mitigations_on())
+ return PR_SPEC_NOT_AFFECTED;
+
+ if (spectre_v4_mitigations_dynamic())
+ break;
+
+ /* Mitigations are disabled, so we're vulnerable. */
+ fallthrough;
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+ return PR_SPEC_ENABLE;
+ }
+
+ /* Check the mitigation state for this task */
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+
+ if (task_spec_ssb_noexec(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
+
+ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssbd_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d8ebfd813e28..f49b349e16a3 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -34,6 +34,7 @@
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
+#include <asm/mte.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
@@ -1032,6 +1033,35 @@ static int pac_generic_keys_set(struct task_struct *target,
#endif /* CONFIG_CHECKPOINT_RESTORE */
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+static int tagged_addr_ctrl_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ long ctrl = get_tagged_addr_ctrl(target);
+
+ if (IS_ERR_VALUE(ctrl))
+ return ctrl;
+
+ return membuf_write(&to, &ctrl, sizeof(ctrl));
+}
+
+static int tagged_addr_ctrl_set(struct task_struct *target, const struct
+ user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf, const
+ void __user *ubuf)
+{
+ int ret;
+ long ctrl;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ if (ret)
+ return ret;
+
+ return set_tagged_addr_ctrl(target, ctrl);
+}
+#endif
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -1051,6 +1081,9 @@ enum aarch64_regset {
REGSET_PACG_KEYS,
#endif
#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ REGSET_TAGGED_ADDR_CTRL,
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -1148,6 +1181,16 @@ static const struct user_regset aarch64_regsets[] = {
},
#endif
#endif
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+ [REGSET_TAGGED_ADDR_CTRL] = {
+ .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = tagged_addr_ctrl_get,
+ .set = tagged_addr_ctrl_set,
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
@@ -1691,6 +1734,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
+ switch (request) {
+ case PTRACE_PEEKMTETAGS:
+ case PTRACE_POKEMTETAGS:
+ return mte_ptrace_copy_tags(child, request, addr, data);
+ }
+
return ptrace_request(child, request, addr, data);
}
@@ -1793,7 +1842,7 @@ void syscall_trace_exit(struct pt_regs *regs)
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 542d6edc6806..84eec95ec06c 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -36,18 +36,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
mov x14, xzr /* x14 = entry ptr */
mov x13, xzr /* x13 = copy dest */
- /* Clear the sctlr_el2 flags. */
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
- b.ne 1f
- mrs x0, sctlr_el2
- mov_q x1, SCTLR_ELx_FLAGS
- bic x0, x0, x1
- pre_disable_mmu_workaround
- msr sctlr_el2, x0
- isb
-1:
-
/* Check if the new image needs relocation. */
tbnz x16, IND_DONE_BIT, .Ldone
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index a5e8b3b9d798..a6d18755652f 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -18,16 +18,16 @@ struct return_address_data {
void *addr;
};
-static int save_return_addr(struct stackframe *frame, void *d)
+static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
- data->addr = (void *)frame->pc;
- return 1;
+ data->addr = (void *)pc;
+ return false;
} else {
--data->level;
- return 0;
+ return true;
}
}
NOKPROBE_SYMBOL(save_return_addr);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 77c4c9bad1b8..53acbeca4f57 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -280,7 +280,6 @@ u64 cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
-EXPORT_SYMBOL_GPL(cpu_logical_map);
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 3b4f31f35e45..bdcaaf091e1e 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -244,7 +244,8 @@ static int preserve_sve_context(struct sve_context __user *ctx)
if (vq) {
/*
* This assumes that the SVE state has already been saved to
- * the task struct by calling preserve_fpsimd_context().
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
*/
err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
current->thread.sve_state,
@@ -748,6 +749,9 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->pstate |= PSR_BTYPE_C;
}
+ /* TCO (Tag Check Override) always cleared for signal handlers */
+ regs->pstate &= ~PSR_TCO_BIT;
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
@@ -932,6 +936,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR,
+ (void __user *)NULL, current);
+ }
+
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 03957a1ae6c0..355ee9eed4dd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -151,7 +151,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
break;
}
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
+ fallthrough;
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index c8a3fee00c11..5892e79fa429 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
/*
* We write the release address as LE regardless of the native
- * endianess of the kernel. Therefore, any boot-loaders that
+ * endianness of the kernel. Therefore, any boot-loaders that
* read this address need to convert this address to the
- * boot-loader's endianess before jumping. This is mandated by
+ * boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
deleted file mode 100644
index b26955f56750..000000000000
--- a/arch/arm64/kernel/ssbd.c
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
- */
-
-#include <linux/compat.h>
-#include <linux/errno.h>
-#include <linux/prctl.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/thread_info.h>
-
-#include <asm/cpufeature.h>
-
-static void ssbd_ssbs_enable(struct task_struct *task)
-{
- u64 val = is_compat_thread(task_thread_info(task)) ?
- PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
- task_pt_regs(task)->pstate |= val;
-}
-
-static void ssbd_ssbs_disable(struct task_struct *task)
-{
- u64 val = is_compat_thread(task_thread_info(task)) ?
- PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
- task_pt_regs(task)->pstate &= ~val;
-}
-
-/*
- * prctl interface for SSBD
- */
-static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
-{
- int state = arm64_get_ssbd_state();
-
- /* Unsupported */
- if (state == ARM64_SSBD_UNKNOWN)
- return -ENODEV;
-
- /* Treat the unaffected/mitigated state separately */
- if (state == ARM64_SSBD_MITIGATED) {
- switch (ctrl) {
- case PR_SPEC_ENABLE:
- return -EPERM;
- case PR_SPEC_DISABLE:
- case PR_SPEC_FORCE_DISABLE:
- return 0;
- }
- }
-
- /*
- * Things are a bit backward here: the arm64 internal API
- * *enables the mitigation* when the userspace API *disables
- * speculation*. So much fun.
- */
- switch (ctrl) {
- case PR_SPEC_ENABLE:
- /* If speculation is force disabled, enable is not allowed */
- if (state == ARM64_SSBD_FORCE_ENABLE ||
- task_spec_ssb_force_disable(task))
- return -EPERM;
- task_clear_spec_ssb_disable(task);
- clear_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_enable(task);
- break;
- case PR_SPEC_DISABLE:
- if (state == ARM64_SSBD_FORCE_DISABLE)
- return -EPERM;
- task_set_spec_ssb_disable(task);
- set_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_disable(task);
- break;
- case PR_SPEC_FORCE_DISABLE:
- if (state == ARM64_SSBD_FORCE_DISABLE)
- return -EPERM;
- task_set_spec_ssb_disable(task);
- task_set_spec_ssb_force_disable(task);
- set_tsk_thread_flag(task, TIF_SSBD);
- ssbd_ssbs_disable(task);
- break;
- default:
- return -ERANGE;
- }
-
- return 0;
-}
-
-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
- unsigned long ctrl)
-{
- switch (which) {
- case PR_SPEC_STORE_BYPASS:
- return ssbd_prctl_set(task, ctrl);
- default:
- return -ENODEV;
- }
-}
-
-static int ssbd_prctl_get(struct task_struct *task)
-{
- switch (arm64_get_ssbd_state()) {
- case ARM64_SSBD_UNKNOWN:
- return -ENODEV;
- case ARM64_SSBD_FORCE_ENABLE:
- return PR_SPEC_DISABLE;
- case ARM64_SSBD_KERNEL:
- if (task_spec_ssb_force_disable(task))
- return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
- if (task_spec_ssb_disable(task))
- return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
- return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case ARM64_SSBD_FORCE_DISABLE:
- return PR_SPEC_ENABLE;
- default:
- return PR_SPEC_NOT_AFFECTED;
- }
-}
-
-int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
-{
- switch (which) {
- case PR_SPEC_STORE_BYPASS:
- return ssbd_prctl_get(task);
- default:
- return -ENODEV;
- }
-}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 2dd8e3b8b94b..fa56af1a59c3 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -118,12 +118,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data)
+ bool (*fn)(void *, unsigned long), void *data)
{
while (1) {
int ret;
- if (fn(frame, data))
+ if (!fn(data, frame->pc))
break;
ret = unwind_frame(tsk, frame);
if (ret < 0)
@@ -132,84 +132,89 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
}
NOKPROBE_SYMBOL(walk_stackframe);
-#ifdef CONFIG_STACKTRACE
-struct stack_trace_data {
- struct stack_trace *trace;
- unsigned int no_sched_functions;
- unsigned int skip;
-};
-
-static int save_trace(struct stackframe *frame, void *d)
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
{
- struct stack_trace_data *data = d;
- struct stack_trace *trace = data->trace;
- unsigned long addr = frame->pc;
-
- if (data->no_sched_functions && in_sched_functions(addr))
- return 0;
- if (data->skip) {
- data->skip--;
- return 0;
- }
-
- trace->entries[trace->nr_entries++] = addr;
-
- return trace->nr_entries >= trace->max_entries;
+ printk("%s %pS\n", loglvl, (void *)where);
}
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- struct stack_trace_data data;
struct stackframe frame;
+ int skip = 0;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = 0;
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
- start_backtrace(&frame, regs->regs[29], regs->pc);
- walk_stackframe(current, &frame, save_trace, &data);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_regs);
+ if (regs) {
+ if (user_mode(regs))
+ return;
+ skip = 1;
+ }
-static noinline void __save_stack_trace(struct task_struct *tsk,
- struct stack_trace *trace, unsigned int nosched)
-{
- struct stack_trace_data data;
- struct stackframe frame;
+ if (!tsk)
+ tsk = current;
if (!try_get_task_stack(tsk))
return;
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = nosched;
-
- if (tsk != current) {
- start_backtrace(&frame, thread_saved_fp(tsk),
- thread_saved_pc(tsk));
- } else {
- /* We don't want this function nor the caller */
- data.skip += 2;
+ if (tsk == current) {
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
- (unsigned long)__save_stack_trace);
+ (unsigned long)dump_backtrace);
+ } else {
+ /*
+ * task blocked in __switch_to
+ */
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
}
- walk_stackframe(tsk, &frame, save_trace, &data);
+ printk("%sCall trace:\n", loglvl);
+ do {
+ /* skip until specified stack frame */
+ if (!skip) {
+ dump_backtrace_entry(frame.pc, loglvl);
+ } else if (frame.fp == regs->regs[29]) {
+ skip = 0;
+ /*
+ * Mostly, this is the case where this function is
+ * called in panic/abort. As exception handler's
+ * stack frame does not contain the corresponding pc
+ * at which an exception has taken place, use regs->pc
+ * instead.
+ */
+ dump_backtrace_entry(regs->pc, loglvl);
+ }
+ } while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- __save_stack_trace(tsk, trace, 1);
+ dump_backtrace(NULL, tsk, loglvl);
+ barrier();
}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-void save_stack_trace(struct stack_trace *trace)
+#ifdef CONFIG_STACKTRACE
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- __save_stack_trace(current, trace, 0);
+ struct stackframe frame;
+
+ if (regs)
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+ else if (task == current)
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)arch_stack_walk);
+ else
+ start_backtrace(&frame, thread_saved_fp(task),
+ thread_saved_pc(task));
+
+ walk_stackframe(task, &frame, consume_entry, cookie);
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index c1dee9066ff9..96cd347c7a46 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -10,6 +10,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
+#include <asm/mte.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
@@ -72,8 +73,10 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed.
*/
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- arm64_set_ssbd_mitigation(false);
+ spectre_v4_enable_mitigation(NULL);
+
+ /* Restore additional MTE-specific configuration */
+ mte_suspend_exit();
}
/*
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5f0c04863d2c..e4c0dadf0d92 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -123,6 +123,16 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
local_daif_restore(DAIF_PROCCTX);
user_exit();
+ if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
+ /*
+ * Process the asynchronous tag check fault before the actual
+ * syscall. do_notify_resume() will send a signal to userspace
+ * before the syscall is restarted.
+ */
+ regs->regs[0] = -ERESTARTNOINTR;
+ return;
+ }
+
if (has_syscall_work(flags)) {
/*
* The de-facto standard way to skip a system call using ptrace
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0801a0f3c156..ff1dd1dbfe64 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -36,21 +36,23 @@ void store_cpu_topology(unsigned int cpuid)
if (mpidr & MPIDR_UP_BITMASK)
return;
- /* Create cpu topology mapping based on MPIDR. */
- if (mpidr & MPIDR_MT_BITMASK) {
- /* Multiprocessor system : Multi-threads per core */
- cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
- } else {
- /* Multiprocessor system : Single-thread per core */
- cpuid_topo->thread_id = -1;
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
- }
+ /*
+ * This would be the place to create cpu topology based on MPIDR.
+ *
+ * However, it cannot be trusted to depict the actual topology; some
+ * pieces of the architecture enforce an artificial cap on Aff0 values
+ * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
+ * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
+ * having absolutely no relationship to the actual underlying system
+ * topology, and cannot be reasonably used as core / package ID.
+ *
+ * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
+ * we still wouldn't be able to obtain a sane core ID. This means we
+ * need to entirely ignore MPIDR for any topology deduction.
+ */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 13ebd5ca2070..8af4e0e85736 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
#include <asm/traps.h>
@@ -53,11 +54,6 @@ static const char *handler[]= {
int show_unhandled_signals = 0;
-static void dump_backtrace_entry(unsigned long where, const char *loglvl)
-{
- printk("%s %pS\n", loglvl, (void *)where);
-}
-
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
@@ -83,66 +79,6 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
- const char *loglvl)
-{
- struct stackframe frame;
- int skip = 0;
-
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
-
- if (regs) {
- if (user_mode(regs))
- return;
- skip = 1;
- }
-
- if (!tsk)
- tsk = current;
-
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current) {
- start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)dump_backtrace);
- } else {
- /*
- * task blocked in __switch_to
- */
- start_backtrace(&frame,
- thread_saved_fp(tsk),
- thread_saved_pc(tsk));
- }
-
- printk("%sCall trace:\n", loglvl);
- do {
- /* skip until specified stack frame */
- if (!skip) {
- dump_backtrace_entry(frame.pc, loglvl);
- } else if (frame.fp == regs->regs[29]) {
- skip = 0;
- /*
- * Mostly, this is the case where this function is
- * called in panic/abort. As exception handler's
- * stack frame does not contain the corresponding pc
- * at which an exception has taken place, use regs->pc
- * instead.
- */
- dump_backtrace_entry(regs->pc, loglvl);
- }
- } while (!unwind_frame(tsk, &frame));
-
- put_task_stack(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
-{
- dump_backtrace(NULL, tsk, loglvl);
- barrier();
-}
-
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#elif defined(CONFIG_PREEMPT_RT)
@@ -200,9 +136,9 @@ void die(const char *str, struct pt_regs *regs, int err)
oops_exit();
if (in_interrupt())
- panic("Fatal exception in interrupt");
+ panic("%s: Fatal exception in interrupt", str);
if (panic_on_oops)
- panic("Fatal exception");
+ panic("%s: Fatal exception", str);
raw_spin_unlock_irqrestore(&die_lock, flags);
@@ -412,7 +348,7 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-void force_signal_inject(int signal, int code, unsigned long address)
+void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
{
const char *desc;
struct pt_regs *regs = current_pt_regs();
@@ -438,7 +374,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
signal = SIGKILL;
}
- arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
+ arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
}
/*
@@ -455,7 +391,7 @@ void arm64_notify_segfault(unsigned long addr)
code = SEGV_ACCERR;
mmap_read_unlock(current->mm);
- force_signal_inject(SIGSEGV, code, addr);
+ force_signal_inject(SIGSEGV, code, addr, 0);
}
void do_undefinstr(struct pt_regs *regs)
@@ -468,17 +404,28 @@ void do_undefinstr(struct pt_regs *regs)
return;
BUG_ON(!user_mode(regs));
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_undefinstr);
void do_bti(struct pt_regs *regs)
{
BUG_ON(!user_mode(regs));
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_bti);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
+{
+ /*
+ * Unexpected FPAC exception or pointer authentication failure in
+ * the kernel: kill the task before it does any more harm.
+ */
+ BUG_ON(!user_mode(regs));
+ force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
+}
+NOKPROBE_SYMBOL(do_ptrauth_fault);
+
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
res = -EFAULT; \
@@ -528,7 +475,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("ic ivau", address, ret);
break;
default:
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
return;
}
@@ -581,7 +528,7 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs)
sysreg = esr_sys64_to_sysreg(esr);
if (do_emulate_mrs(regs, sysreg, rt) != 0)
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
static void wfi_handler(unsigned int esr, struct pt_regs *regs)
@@ -775,6 +722,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
[ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
+ [ESR_ELx_EC_FPAC] = "FPAC",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -935,26 +883,6 @@ asmlinkage void enter_from_user_mode(void)
}
NOKPROBE_SYMBOL(enter_from_user_mode);
-void __pte_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
-}
-
-void __pmd_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
-}
-
-void __pud_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
-}
-
-void __pgd_error(const char *file, int line, unsigned long val)
-{
- pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
-}
-
/* GENERIC_BUG traps */
int is_valid_bugaddr(unsigned long addr)
@@ -994,6 +922,21 @@ static struct break_hook bug_break_hook = {
.imm = BUG_BRK_IMM,
};
+static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
+{
+ pr_err("%s generated an invalid instruction at %pS!\n",
+ in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+ (void *)instruction_pointer(regs));
+
+ /* We cannot handle this */
+ return DBG_HOOK_ERROR;
+}
+
+static struct break_hook fault_break_hook = {
+ .fn = reserved_fault_handler,
+ .imm = FAULT_BRK_IMM,
+};
+
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_ESR_RECOVER 0x20
@@ -1059,6 +1002,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
void __init trap_init(void)
{
register_kernel_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&fault_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_kernel_break_hook(&kasan_break_hook);
#endif
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d4202a32abc9..debb8995d57f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -30,15 +30,11 @@
#include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
-#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start[], vdso32_end[];
-#endif /* CONFIG_COMPAT_VDSO */
enum vdso_abi {
VDSO_ABI_AA64,
-#ifdef CONFIG_COMPAT_VDSO
VDSO_ABI_AA32,
-#endif /* CONFIG_COMPAT_VDSO */
};
enum vvar_pages {
@@ -284,21 +280,17 @@ up_fail:
/*
* Create and map the vectors page for AArch32 tasks.
*/
-#ifdef CONFIG_COMPAT_VDSO
static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
}
-#endif /* CONFIG_COMPAT_VDSO */
enum aarch32_map {
AA32_MAP_VECTORS, /* kuser helpers */
-#ifdef CONFIG_COMPAT_VDSO
+ AA32_MAP_SIGPAGE,
AA32_MAP_VVAR,
AA32_MAP_VDSO,
-#endif
- AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
@@ -309,7 +301,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vectors]", /* ABI */
.pages = &aarch32_vectors_page,
},
-#ifdef CONFIG_COMPAT_VDSO
+ [AA32_MAP_SIGPAGE] = {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_sig_page,
+ },
[AA32_MAP_VVAR] = {
.name = "[vvar]",
.fault = vvar_fault,
@@ -319,11 +314,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
-#endif /* CONFIG_COMPAT_VDSO */
- [AA32_MAP_SIGPAGE] = {
- .name = "[sigpage]", /* ABI */
- .pages = &aarch32_sig_page,
- },
};
static int aarch32_alloc_kuser_vdso_page(void)
@@ -362,25 +352,25 @@ static int aarch32_alloc_sigpage(void)
return 0;
}
-#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
+
+ if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
+ return 0;
+
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
return __vdso_init(VDSO_ABI_AA32);
}
-#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
int ret;
-#ifdef CONFIG_COMPAT_VDSO
ret = __aarch32_alloc_vdso_pages();
if (ret)
return ret;
-#endif
ret = aarch32_alloc_sigpage();
if (ret)
@@ -449,14 +439,12 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (ret)
goto out;
-#ifdef CONFIG_COMPAT_VDSO
- ret = __setup_additional_pages(VDSO_ABI_AA32,
- mm,
- bprm,
- uses_interp);
- if (ret)
- goto out;
-#endif /* CONFIG_COMPAT_VDSO */
+ if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
+ ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
+ uses_interp);
+ if (ret)
+ goto out;
+ }
ret = aarch32_sigreturn_setup(mm);
out:
@@ -497,8 +485,7 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
int ret;
@@ -506,11 +493,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __setup_additional_pages(VDSO_ABI_AA64,
- mm,
- bprm,
- uses_interp);
-
+ ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
mmap_write_unlock(mm);
return ret;
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 5139a5f19256..d6adb4677c25 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
+quiet_cmd_vdso_install = INSTALL32 $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
vdso.so: $(obj)/vdso.so.dbg
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ec8e894684a7..82801d98a2b7 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -20,6 +20,13 @@ ENTRY(_text)
jiffies = jiffies_64;
+
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
@@ -35,6 +42,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
+ HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
@@ -97,7 +105,7 @@ SECTIONS
*(.eh_frame)
}
- . = KIMAGE_VADDR + TEXT_OFFSET;
+ . = KIMAGE_VADDR;
.head.text : {
_text = .;
@@ -266,4 +274,4 @@ ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 318c8f2df245..043756db8f6e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
-config KVM_INDIRECT_VECTORS
- def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 691d21e4c717..acf9a993dfb6 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = kvm_arm_pvtime_supported();
+ break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
@@ -1256,6 +1259,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}
+static int kvm_map_vectors(void)
+{
+ /*
+ * SV2 = ARM64_SPECTRE_V2
+ * HEL2 = ARM64_HARDEN_EL2_VECTORS
+ *
+ * !SV2 + !HEL2 -> use direct vectors
+ * SV2 + !HEL2 -> use hardened vectors in place
+ * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
+ * SV2 + HEL2 -> use hardened vectors and use exec mapping
+ */
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+ __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+ }
+
+ if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+ unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+ /*
+ * Always allocate a spare vector slot, as we don't
+ * know yet which CPUs have a BP hardening slot that
+ * we can reuse.
+ */
+ __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+ return create_hyp_exec_mappings(vect_pa, size,
+ &__kvm_bp_vect_base);
+ }
+
+ return 0;
+}
+
static void cpu_init_hyp_mode(void)
{
phys_addr_t pgd_ptr;
@@ -1292,7 +1329,7 @@ static void cpu_init_hyp_mode(void)
* at EL2.
*/
if (this_cpu_has_cap(ARM64_SSBS) &&
- arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
kvm_call_hyp_nvhe(__kvm_enable_ssbs);
}
}
@@ -1549,10 +1586,6 @@ static int init_hyp_mode(void)
}
}
- err = hyp_map_aux_data();
- if (err)
- kvm_err("Cannot map host auxiliary data: %d\n", err);
-
return 0;
out_err:
@@ -1640,6 +1673,10 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
+ if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
+ kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
+ "Only trusted guests should be used on this system.\n");
+
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fe6c7d79309d..5d690d60ccad 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
- /* fall through */
+ fallthrough;
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_BKPT32:
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index f54f0e89a71c..d898f0da5802 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/
-obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ee32a7743389..76e7eaf4675e 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -196,20 +196,23 @@ alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
- .global abort_guest_exit_start
abort_guest_exit_start:
isb
- .global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
+ ret
+
+ _kvm_extable abort_guest_exit_start, 9997f
+ _kvm_extable abort_guest_exit_end, 9997f
+9997:
+ msr daifset, #4 // Mask aborts
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // If the exception took place, restore the EL1 exception
- // context so that we can report some information.
- // Merge the exception code with the SError pending bit.
- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ // restore the EL1 exception context so that we can report some
+ // information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 689fccbc9de7..7ea277b82967 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,30 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
+.macro save_caller_saved_regs_vect
+ /* x0 and x1 were saved in the vector entry */
+ stp x2, x3, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
.text
.macro do_el2_call
@@ -92,35 +116,6 @@ el1_hvc_guest:
ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
- b wa2_end
-alternative_cb_end
- get_vcpu_ptr x2, x0
- ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- // Sanitize the argument and update the guest flags
- ldr x1, [sp, #8] // Guest's x1
- clz w1, w1 // Murphy's device:
- lsr w1, w1, #5 // w1 = !!w1 without using
- eor w1, w1, #1 // the flags...
- bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
- str x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- /* Check that we actually need to perform the call */
- hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
- cbz x0, wa2_end
-
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
- smc #0
-
- /* Don't leak data from the SMC call */
- mov x3, xzr
-wa2_end:
- mov x2, xzr
- mov x1, xzr
-#endif
-
wa_epilogue:
mov x0, xzr
add sp, sp, #16
@@ -143,13 +138,19 @@ el1_error:
b __guest_exit
el2_sync:
- /* Check for illegal exception return, otherwise panic */
+ /* Check for illegal exception return */
mrs x0, spsr_el2
+ tbnz x0, #20, 1f
- /* if this was something else, then panic! */
- tst x0, #PSR_IL_BIT
- b.eq __hyp_panic
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+ bl kvm_unexpected_el2_exception
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
+
+ eret
+1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
@@ -157,27 +158,14 @@ el2_sync:
el2_error:
- ldp x0, x1, [sp], #16
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+
+ bl kvm_unexpected_el2_exception
+
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
- /*
- * Only two possibilities:
- * 1) Either we come from the exit path, having just unmasked
- * PSTATE.A: change the return code to an EL2 fault, and
- * carry on, as we're already in a sane state to handle it.
- * 2) Or we come from anywhere else, and that's a bug: we panic.
- *
- * For (1), x0 contains the original return code and x1 doesn't
- * contain anything meaningful at that stage. We can reuse them
- * as temp registers.
- * For (2), who cares?
- */
- mrs x0, elr_el2
- adr x1, abort_guest_exit_start
- cmp x0, x1
- adr x1, abort_guest_exit_end
- ccmp x0, x1, #4, ne
- b.ne __hyp_panic
- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
sb
@@ -271,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
1: esb
@@ -321,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
-#endif
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 0297dc63988c..5e28ea6aa097 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -21,70 +21,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: ptr[14] = read_debug(reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: ptr[13] = read_debug(reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: ptr[12] = read_debug(reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: ptr[11] = read_debug(reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: ptr[10] = read_debug(reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: ptr[9] = read_debug(reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: ptr[8] = read_debug(reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: ptr[7] = read_debug(reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: ptr[6] = read_debug(reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: ptr[5] = read_debug(reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: ptr[4] = read_debug(reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: ptr[3] = read_debug(reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: ptr[2] = read_debug(reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: ptr[1] = read_debug(reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: write_debug(ptr[14], reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: write_debug(ptr[13], reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: write_debug(ptr[12], reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: write_debug(ptr[11], reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: write_debug(ptr[10], reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: write_debug(ptr[9], reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: write_debug(ptr[8], reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: write_debug(ptr[7], reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: write_debug(ptr[6], reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: write_debug(ptr[5], reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: write_debug(ptr[4], reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: write_debug(ptr[3], reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: write_debug(ptr[2], reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: write_debug(ptr[1], reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: write_debug(ptr[0], reg, 0); \
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 426ef65601dd..d0f07e8cc3ff 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -29,6 +30,9 @@
extern const char __hyp_panic_string[];
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
@@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
- asm volatile("at s1e1r, %0" : : "r" (far));
- isb();
-
- tmp = read_sysreg(par_el1);
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg(par_el1);
+ else
+ tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
@@ -445,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
- !kvm_vcpu_dabt_iss1tw(vcpu);
+ !kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
@@ -475,37 +479,31 @@ exit:
return false;
}
-static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
+static inline void __kvm_unexpected_el2_exception(void)
{
- if (!cpus_have_final_cap(ARM64_SSBD))
- return false;
-
- return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
-}
+ unsigned long addr, fixup;
+ struct kvm_cpu_context *host_ctxt;
+ struct exception_table_entry *entry, *end;
+ unsigned long elr_el2 = read_sysreg(elr_el2);
+
+ entry = hyp_symbol_addr(__start___kvm_ex_table);
+ end = hyp_symbol_addr(__stop___kvm_ex_table);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+ while (entry < end) {
+ addr = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+ if (addr != elr_el2) {
+ entry++;
+ continue;
+ }
-static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * The host runs with the workaround always present. If the
- * guest wants it disabled, so be it...
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
-}
+ write_sysreg(fixup, elr_el2);
+ return;
+ }
-static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * If the guest has disabled the workaround, bring it back on.
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
+ hyp_panic(host_ctxt);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 341be2f2f312..8d3dd4f47924 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
@@ -270,3 +266,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 69eae608d670..b15d65a42042 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
isb();
}
+ /*
+ * __load_guest_stage2() includes an ISB only when the AT
+ * workaround is applied. Take care of the opposite condition,
+ * ensuring that we always have an ISB, but not two ISBs back
+ * to back.
+ */
__load_guest_stage2(mmu);
+ asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 5a0073511efb..452f4cacd674 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
@@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
@@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
@@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index c52d714e0d75..ecf67e678203 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu);
@@ -217,3 +213,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 550dfa3e53cd..9824025ccc5c 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature = smccc_get_arg1(vcpu);
switch (feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
val = SMCCC_RET_SUCCESS;
break;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
case ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- case KVM_SSBD_UNKNOWN:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_SSBD_KERNEL:
- val = SMCCC_RET_SUCCESS;
- break;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ case SPECTRE_MITIGATED:
+ /*
+ * SSBS everywhere: Indicate no firmware
+ * support, as the SSBS support will be
+ * indicated to the guest and the default is
+ * safe.
+ *
+ * Otherwise, expose a permanent mitigation
+ * to the guest, and hide SSBS so that the
+ * guest stays protected.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ break;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0121ef2c7c8d..3d26b47a1343 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
+ bool may_block)
{
struct kvm *kvm = mmu->kvm;
pgd_t *pgd;
@@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
- if (next != end)
+ if (may_block && next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+{
+ __unmap_stage2_range(mmu, start, size, true);
+}
+
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -1843,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -1871,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
}
/*
@@ -1964,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
- if (vma_pagesize == PUD_SIZE) {
+ /*
+ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
+ * all we have is a 2-level page table. Trying to map a PUD in
+ * this case would be fatally wrong.
+ */
+ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);
@@ -2119,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
goto out;
}
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
ret = 1;
goto out_unlock;
@@ -2208,18 +2220,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+ unsigned flags = *(unsigned *)data;
+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
+
+ __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, unsigned flags)
{
if (!kvm->arch.mmu.pgd)
return 0;
trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
return 0;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index f0d0312c0a55..81916e360b1e 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
kvm_pmu_release_perf_event(&pmu->pmc[i]);
+ irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
@@ -434,6 +435,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
}
/**
+ * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
+ * to the event.
+ * This is why we need a callback to do it once outside of the NMI context.
+ */
+static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_pmu *pmu;
+
+ pmu = container_of(work, struct kvm_pmu, overflow_work);
+ vcpu = kvm_pmc_to_vcpu(pmu->pmc);
+
+ kvm_vcpu_kick(vcpu);
+}
+
+/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
@@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
- kvm_vcpu_kick(vcpu);
+
+ if (!in_nmi())
+ kvm_vcpu_kick(vcpu);
+ else
+ irq_work_queue(&vcpu->arch.pmu.overflow_work);
}
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
@@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return ret;
}
+ init_irq_work(&vcpu->arch.pmu.overflow_work,
+ kvm_pmu_perf_overflow_notify_vcpu);
+
vcpu->arch.pmu.created = true;
return 0;
}
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 83415e96b589..db4056ecccfd 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
{
switch (regid) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
}
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- case KVM_SSBD_KERNEL:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_MITIGATED:
+ /*
+ * As for the hypercall discovery, we pretend we
+ * don't have any FW mitigation if SSBS is there at
+ * all times.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
- case KVM_SSBD_UNKNOWN:
- default:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
+ case SPECTRE_VULNERABLE:
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
}
}
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val = kvm_psci_version(vcpu, vcpu->kvm);
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
- break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- kvm_arm_get_vcpu_workaround_2_flag(vcpu))
- val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
break;
default:
return -ENOENT;
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
return -EINVAL;
- wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (get_kernel_wa_level(reg->id) < wa_level)
- return -EINVAL;
-
/* The enabled bit must not be set unless the level is AVAIL. */
- if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- wa_level != val)
+ if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
+ (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
return -EINVAL;
- /* Are we finished or do we need to check the enable bit ? */
- if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
- return 0;
-
/*
- * If this kernel supports the workaround to be switched on
- * or off, make sure it matches the requested setting.
+ * Map all the possible incoming states to the only two we
+ * really want to deal with.
*/
- switch (wa_level) {
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu,
- val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
+ switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
break;
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
break;
+ default:
+ return -EINVAL;
}
+ /*
+ * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
+ * other way around.
+ */
+ if (get_kernel_wa_level(reg->id) < wa_level)
+ return -EINVAL;
+
return 0;
default:
return -ENOENT;
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index f7b52ce1557e..920ac43077ad 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 steal;
- __le64 steal_le;
- u64 offset;
- int idx;
u64 base = vcpu->arch.steal.base;
+ u64 last_steal = vcpu->arch.steal.last_steal;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
+ int idx;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
-static bool kvm_arm_pvtime_supported(void)
+bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index ee33875c5c2a..f6e8b4a75cbb 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
}
- /* Default workaround setup is enabled (if supported) */
- if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-
/* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu);
out:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 077293b5115f..9ca270603980 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1131,6 +1131,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
+ if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
+ arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
+ } else if (id == SYS_ID_AA64PFR1_EL1) {
+ val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
@@ -1382,6 +1387,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1547,6 +1559,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+
+ { SYS_DESC(SYS_RGSR_EL1), access_mte_regs },
+ { SYS_DESC(SYS_GCR_EL1), access_mte_regs },
+
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
@@ -1571,6 +1587,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_TFSR_EL1), access_mte_regs },
+ { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs },
+
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4691053c5ee4..ff0444352bba 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..8d78acc4fba7 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wf%c at: 0x%08lx",
+ TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
- TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+ TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 5c786b915cd3..52d6f24f65dc 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
- * An access targetting Group0 SGIs can only generate
- * those, while an access targetting Group1 SGIs can
+ * An access targeting Group0 SGIs can only generate
+ * those, while an access targeting Group1 SGIs can
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 2fc253466dbf..d31e1169d9b8 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -16,3 +16,5 @@ lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_ARM64_MTE) += mte.o
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
new file mode 100644
index 000000000000..03ca6d8b8670
--- /dev/null
+++ b/arch/arm64/lib/mte.S
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/mte.h>
+#include <asm/page.h>
+#include <asm/sysreg.h>
+
+ .arch armv8.5-a+memtag
+
+/*
+ * multitag_transfer_size - set \reg to the block size that is accessed by the
+ * LDGM/STGM instructions.
+ */
+ .macro multitag_transfer_size, reg, tmp
+ mrs_s \reg, SYS_GMID_EL1
+ ubfx \reg, \reg, #SYS_GMID_EL1_BS_SHIFT, #SYS_GMID_EL1_BS_SIZE
+ mov \tmp, #4
+ lsl \reg, \tmp, \reg
+ .endm
+
+/*
+ * Clear the tags in a page
+ * x0 - address of the page to be cleared
+ */
+SYM_FUNC_START(mte_clear_page_tags)
+ multitag_transfer_size x1, x2
+1: stgm xzr, [x0]
+ add x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_clear_page_tags)
+
+/*
+ * Copy the tags from the source page to the destination one
+ * x0 - address of the destination page
+ * x1 - address of the source page
+ */
+SYM_FUNC_START(mte_copy_page_tags)
+ mov x2, x0
+ mov x3, x1
+ multitag_transfer_size x5, x6
+1: ldgm x4, [x3]
+ stgm x4, [x2]
+ add x2, x2, x5
+ add x3, x3, x5
+ tst x2, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_copy_page_tags)
+
+/*
+ * Read tags from a user buffer (one tag per byte) and set the corresponding
+ * tags at the given kernel address. Used by PTRACE_POKEMTETAGS.
+ * x0 - kernel address (to)
+ * x1 - user buffer (from)
+ * x2 - number of tags/bytes (n)
+ * Returns:
+ * x0 - number of tags read/set
+ */
+SYM_FUNC_START(mte_copy_tags_from_user)
+ mov x3, x1
+ cbz x2, 2f
+1:
+ uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0
+ lsl x4, x4, #MTE_TAG_SHIFT
+ stg x4, [x0], #MTE_GRANULE_SIZE
+ add x1, x1, #1
+ subs x2, x2, #1
+ b.ne 1b
+
+ // exception handling and function return
+2: sub x0, x1, x3 // update the number of tags set
+ ret
+SYM_FUNC_END(mte_copy_tags_from_user)
+
+/*
+ * Get the tags from a kernel address range and write the tag values to the
+ * given user buffer (one tag per byte). Used by PTRACE_PEEKMTETAGS.
+ * x0 - user buffer (to)
+ * x1 - kernel address (from)
+ * x2 - number of tags/bytes (n)
+ * Returns:
+ * x0 - number of tags read/set
+ */
+SYM_FUNC_START(mte_copy_tags_to_user)
+ mov x3, x0
+ cbz x2, 2f
+1:
+ ldg x4, [x1]
+ ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
+ uao_user_alternative 2f, strb, sttrb, w4, x0, 0
+ add x0, x0, #1
+ add x1, x1, #MTE_GRANULE_SIZE
+ subs x2, x2, #1
+ b.ne 1b
+
+ // exception handling and function return
+2: sub x0, x0, x3 // update the number of tags copied
+ ret
+SYM_FUNC_END(mte_copy_tags_to_user)
+
+/*
+ * Save the tags in a page
+ * x0 - page address
+ * x1 - tag storage
+ */
+SYM_FUNC_START(mte_save_page_tags)
+ multitag_transfer_size x7, x5
+1:
+ mov x2, #0
+2:
+ ldgm x5, [x0]
+ orr x2, x2, x5
+ add x0, x0, x7
+ tst x0, #0xFF // 16 tag values fit in a register,
+ b.ne 2b // which is 16*16=256 bytes
+
+ str x2, [x1], #8
+
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+
+ ret
+SYM_FUNC_END(mte_save_page_tags)
+
+/*
+ * Restore the tags in a page
+ * x0 - page address
+ * x1 - tag storage
+ */
+SYM_FUNC_START(mte_restore_page_tags)
+ multitag_transfer_size x7, x5
+1:
+ ldr x2, [x1], #8
+2:
+ stgm x2, [x0]
+ add x0, x0, x7
+ tst x0, #0xFF
+ b.ne 2b
+
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+
+ ret
+SYM_FUNC_END(mte_restore_page_tags)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index d91030f0ffee..5ead3c3de3b6 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,10 +4,11 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+obj-$(CONFIG_ARM64_MTE) += mteswap.o
KASAN_SANITIZE_physaddr.o += n
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index a206655a39a5..001737a8f309 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,6 +27,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+static unsigned long max_pinned_asids;
+static unsigned long nr_pinned_asids;
+static unsigned long *pinned_asid_map;
+
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
@@ -45,7 +49,7 @@ static u32 get_cpu_asid_bits(void)
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
+ fallthrough;
case 0:
asid = 8;
break;
@@ -72,7 +76,7 @@ void verify_cpu_asid_bits(void)
}
}
-static void set_kpti_asid_bits(void)
+static void set_kpti_asid_bits(unsigned long *map)
{
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
/*
@@ -81,13 +85,15 @@ static void set_kpti_asid_bits(void)
* is set, then the ASID will map only userspace. Thus
* mark even as reserved for kernel.
*/
- memset(asid_map, 0xaa, len);
+ memset(map, 0xaa, len);
}
static void set_reserved_asid_bits(void)
{
- if (arm64_kernel_unmapped_at_el0())
- set_kpti_asid_bits();
+ if (pinned_asid_map)
+ bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+ else if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
@@ -166,6 +172,14 @@ static u64 new_context(struct mm_struct *mm)
return newasid;
/*
+ * If it is pinned, we can keep using it. Note that reserved
+ * takes priority, because even if it is also pinned, we need to
+ * update the generation into the reserved_asids.
+ */
+ if (refcount_read(&mm->context.pinned))
+ return newasid;
+
+ /*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
@@ -256,6 +270,71 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+unsigned long arm64_mm_context_get(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if (!pinned_asid_map)
+ return 0;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ asid = atomic64_read(&mm->context.id);
+
+ if (refcount_inc_not_zero(&mm->context.pinned))
+ goto out_unlock;
+
+ if (nr_pinned_asids >= max_pinned_asids) {
+ asid = 0;
+ goto out_unlock;
+ }
+
+ if (!asid_gen_match(asid)) {
+ /*
+ * We went through one or more rollover since that ASID was
+ * used. Ensure that it is still valid, or generate a new one.
+ */
+ asid = new_context(mm);
+ atomic64_set(&mm->context.id, asid);
+ }
+
+ nr_pinned_asids++;
+ __set_bit(asid2idx(asid), pinned_asid_map);
+ refcount_set(&mm->context.pinned, 1);
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ asid &= ~ASID_MASK;
+
+ /* Set the equivalent of USER_ASID_BIT */
+ if (asid && arm64_kernel_unmapped_at_el0())
+ asid |= 1;
+
+ return asid;
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_get);
+
+void arm64_mm_context_put(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid = atomic64_read(&mm->context.id);
+
+ if (!pinned_asid_map)
+ return;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ if (refcount_dec_and_test(&mm->context.pinned)) {
+ __clear_bit(asid2idx(asid), pinned_asid_map);
+ nr_pinned_asids--;
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_put);
+
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
@@ -296,8 +375,11 @@ static int asids_update_limit(void)
{
unsigned long num_available_asids = NUM_USER_ASIDS;
- if (arm64_kernel_unmapped_at_el0())
+ if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2;
+ if (pinned_asid_map)
+ set_kpti_asid_bits(pinned_asid_map);
+ }
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
@@ -305,6 +387,13 @@ static int asids_update_limit(void)
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
+
+ /*
+ * There must always be an ASID available after rollover. Ensure that,
+ * even if all CPUs have a reserved ASID and the maximum number of ASIDs
+ * are pinned, there still is at least one empty slot in the ASID map.
+ */
+ max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
return 0;
}
arch_initcall(asids_update_limit);
@@ -319,13 +408,17 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
+ pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
+ sizeof(*pinned_asid_map), GFP_KERNEL);
+ nr_pinned_asids = 0;
+
/*
* We cannot call set_reserved_asid_bits() here because CPU
* caps are not finalized yet, so it is safer to assume KPTI
* and reserve kernel ASID's from beginning.
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
- set_kpti_asid_bits();
+ set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 2ee7b73433a5..70a71f38b6a9 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -6,21 +6,32 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
-void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void copy_highpage(struct page *to, struct page *from)
{
- struct page *page = virt_to_page(kto);
+ struct page *kto = page_address(to);
+ struct page *kfrom = page_address(from);
+
copy_page(kto, kfrom);
- flush_dcache_page(page);
+
+ if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
+ set_bit(PG_mte_tagged, &to->flags);
+ mte_copy_page_tags(kto, kfrom);
+ }
}
-EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+EXPORT_SYMBOL(copy_highpage);
-void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- clear_page(kaddr);
+ copy_highpage(to, from);
+ flush_dcache_page(to);
}
-EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+EXPORT_SYMBOL_GPL(copy_user_highpage);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index eee1732ab6cd..aa0060178343 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -14,9 +14,7 @@ int fixup_exception(struct pt_regs *regs)
if (!fixup)
return 0;
- if (IS_ENABLED(CONFIG_BPF_JIT) &&
- regs->pc >= BPF_JIT_REGION_START &&
- regs->pc < BPF_JIT_REGION_END)
+ if (in_bpf_jit(regs))
return arm64_bpf_fixup_exception(fixup, regs);
regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f07333e86c2f..94c99c1c19e3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -218,7 +218,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
- flush_tlb_fix_spurious_fault(vma, address);
+ /* Invalidate a stale read-only entry */
+ if (dirty)
+ flush_tlb_page(vma, address);
return 1;
}
@@ -641,6 +643,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 0;
}
+static int do_tag_check_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ do_bad_area(addr, esr, regs);
+ return 0;
+}
+
static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
{ do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
@@ -659,7 +668,7 @@ static const struct fault_info fault_info[] = {
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
+ { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
{ do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 75df62fea1b6..936c4762dadf 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -122,7 +122,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
if (old == 0 || new == 0)
@@ -136,6 +136,17 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
if (old & ~new & PTE_NG)
return false;
+ /*
+ * Changing the memory type between Normal and Normal-Tagged is safe
+ * since Tagged is considered a permission attribute from the
+ * mismatched attribute aliases perspective.
+ */
+ if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
+ ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
+ mask |= PTE_ATTRINDX_MASK;
+
return ((old ^ new) & ~mask) == 0;
}
@@ -491,7 +502,12 @@ static void __init map_mem(pgd_t *pgdp)
if (memblock_is_nomap(reg))
continue;
- __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
+ /*
+ * The linear map must allow allocation tags reading/writing
+ * if MTE is present. Otherwise, it has the same attributes as
+ * PAGE_KERNEL.
+ */
+ __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
}
/*
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
new file mode 100644
index 000000000000..c52c1847079c
--- /dev/null
+++ b/arch/arm64/mm/mteswap.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pagemap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <asm/mte.h>
+
+static DEFINE_XARRAY(mte_pages);
+
+void *mte_allocate_tag_storage(void)
+{
+ /* tags granule is 16 bytes, 2 tags stored per byte */
+ return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
+}
+
+void mte_free_tag_storage(char *storage)
+{
+ kfree(storage);
+}
+
+int mte_save_tags(struct page *page)
+{
+ void *tag_storage, *ret;
+
+ if (!test_bit(PG_mte_tagged, &page->flags))
+ return 0;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ /* page_private contains the swap entry.val set in do_swap_page */
+ ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (ret) {
+ /* Entry is being replaced, free the old entry */
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+bool mte_restore_tags(swp_entry_t entry, struct page *page)
+{
+ void *tags = xa_load(&mte_pages, entry.val);
+
+ if (!tags)
+ return false;
+
+ mte_restore_page_tags(page_address(page), tags);
+
+ return true;
+}
+
+void mte_invalidate_tags(int type, pgoff_t offset)
+{
+ swp_entry_t entry = swp_entry(type, offset);
+ void *tags = xa_erase(&mte_pages, entry.val);
+
+ mte_free_tag_storage(tags);
+}
+
+void mte_invalidate_tags_area(int type)
+{
+ swp_entry_t entry = swp_entry(type, 0);
+ swp_entry_t last_entry = swp_entry(type + 1, 0);
+ void *tags;
+
+ XA_STATE(xa_state, &mte_pages, entry.val);
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, last_entry.val - 1) {
+ __xa_erase(&mte_pages, xa_state.xa_index);
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 73f8b49d485c..676deb220b99 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
*/
const struct cpumask *cpumask_of_node(int node)
{
- if (WARN_ON(node >= nr_node_ids))
+
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ if (WARN_ON(node < 0 || node >= nr_node_ids))
return cpu_none_mask;
if (WARN_ON(node_to_cpumask_map[node] == NULL))
@@ -448,7 +452,7 @@ static int __init dummy_numa_init(void)
* arm64_numa_init() - Initialize NUMA
*
* Try each configured NUMA initialization method until one succeeds. The
- * last fallback is dummy single node config encomapssing whole memory.
+ * last fallback is dummy single node config encompassing whole memory.
*/
void __init arm64_numa_init(void)
{
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 23f648c2a199..1b94f5b82654 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 796e47a571e6..23c326a06b2d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -18,6 +18,7 @@
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/smp.h>
+#include <asm/sysreg.h>
#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
@@ -44,14 +45,18 @@
#define TCR_KASAN_FLAGS 0
#endif
-/* Default MAIR_EL1 */
+/*
+ * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
+ * changed during __cpu_setup to Normal Tagged if the system supports MTE.
+ */
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
/**
@@ -421,6 +426,29 @@ SYM_FUNC_START(__cpu_setup)
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
+#ifdef CONFIG_ARM64_MTE
+ /*
+ * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
+ * (ID_AA64PFR1_EL1[11:8] > 1).
+ */
+ mrs x10, ID_AA64PFR1_EL1
+ ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
+ cmp x10, #ID_AA64PFR1_MTE
+ b.lt 1f
+
+ /* Normal Tagged memory type at the corresponding MAIR index */
+ mov x10, #MAIR_ATTR_NORMAL_TAGGED
+ bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
+
+ /* initialize GCR_EL1: all non-zero tags excluded by default */
+ mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
+ msr_s SYS_GCR_EL1, x10
+
+ /* clear any pending tag check faults in TFSR*_EL1 */
+ msr_s SYS_TFSR_EL1, xzr
+ msr_s SYS_TFSRE0_EL1, xzr
+1:
+#endif
msr mair_el1, x5
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/ptdump.c
index 0b8da1cc1c07..807dc634bbd2 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -41,6 +41,8 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
+ { BPF_JIT_REGION_START, "BPF start" },
+ { BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },
@@ -169,6 +171,10 @@ static const struct prot_bits pte_bits[] = {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL),
.set = "MEM/NORMAL",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL_TAGGED),
+ .set = "MEM/NORMAL-TAGGED",
}
};
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f8912e45be7a..ef9f1d5e989d 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx)
{
- int to = ctx->offset[bpf_to];
- /* -1 to account for the Branch instruction */
- int from = ctx->offset[bpf_from] - 1;
-
- return to - from;
+ /* BPF JMP offset is relative to the next instruction */
+ bpf_insn++;
+ /*
+ * Whereas arm64 branch instructions encode the offset
+ * from the branch itself, so we must subtract 1 from the
+ * instruction offset.
+ */
+ return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
}
static void jit_fill_hole(void *area, unsigned int size)
@@ -642,7 +645,7 @@ emit_bswap_uxt:
/* JUMP off */
case BPF_JMP | BPF_JA:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
@@ -669,7 +672,7 @@ emit_bswap_uxt:
case BPF_JMP32 | BPF_JSLE | BPF_X:
emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm19(jmp_offset);
switch (BPF_OP(code)) {
case BPF_JEQ:
@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_prog *prog = ctx->prog;
int i;
+ /*
+ * - offset[0] offset of the end of prologue,
+ * start of the 1st instruction.
+ * - offset[1] - offset of the end of 1st instruction,
+ * start of the 2nd instruction
+ * [....]
+ * - offset[3] - offset of the end of 3rd instruction,
+ * start of 4th instruction
+ */
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
ctx->offset[i] = ctx->idx;
continue;
}
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
if (ret)
return ret;
}
+ /*
+ * offset is allocated with prog->len + 1 so fill in
+ * the last element with the offset after the last
+ * instruction (end of program)
+ */
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
return 0;
}
@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
- ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
@@ -1089,7 +1108,7 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(prog, ctx.offset);
+ bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);
kfree(jit_data);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index e456652facce..d05c78eace1b 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -220,7 +220,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->a4 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->a4 = regs->orig_a4;
@@ -252,7 +252,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a4 = regs->orig_a4;
regs->pc -= 4;
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 9452d6570b7e..970895df75ec 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -194,7 +194,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->pc -= TRAP0_SIZE;
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 38d335488a54..69e68949787f 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -227,7 +227,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->er0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->er0 = regs->orig_er0;
diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c
index cf99fb79a124..cb3bf19b0640 100644
--- a/arch/hexagon/kernel/module.c
+++ b/arch/hexagon/kernel/module.c
@@ -120,7 +120,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
- /* fallthrough */
+ fallthrough;
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index d48864c48e5a..94cc7ff52dce 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -155,7 +155,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->r00 = -EINTR;
break;
}
- /* Fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index b66ba907019c..87927eb824cc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
-#define acpi_unlazy_tlb(x)
-
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 10850897a91c..779b6972aa84 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -366,6 +366,15 @@ pgd_index (unsigned long address)
}
#define pgd_index pgd_index
+/*
+ * In the kernel's mapped region we know everything is in region number 5, so
+ * as an optimisation its PGD already points to the area for that region.
+ * However, this also means that we cannot use pgd_index() and we must
+ * never add the region here.
+ */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bec762a9b418..fec70d662d0c 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
case DIE_INIT_MONARCH_LEAVE:
if (!kdump_freeze_monarch)
break;
- /* fall through */
+ fallthrough;
case DIE_INIT_SLAVE_LEAVE:
case DIE_INIT_MONARCH_ENTER:
case DIE_MCA_RENDZVOUS_LEAVE:
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b49fe6f618ed..f8150ee74f29 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -3,7 +3,7 @@
* Architecture-specific kernel symbols
*/
-#ifdef CONFIG_VIRTUAL_MEM_MAP
+#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM)
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/memblock.h>
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1a42ba885188..00a496cb346f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -654,7 +654,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
}
} else if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
- /* FALL THROUGH */
+ fallthrough;
default:
val -= bundle(location);
break;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 971f166873aa..0dc3611e7971 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -3472,7 +3472,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
break;
case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
- /* fall through */
+ fallthrough;
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index d07ed65c9c6e..e67b22fc3c60 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -374,7 +374,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
/* note: scr->pt.r10 is already -1 */
break;
}
- /*FALLTHRU*/
+ fallthrough;
case ERESTARTNOINTR:
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 2d4e65ba5c3e..6c1a8951dfbb 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1431,7 +1431,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LDS_IMM_OP:
case LDSA_IMM_OP:
case LDFS_OP:
@@ -1459,7 +1459,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LD_IMM_OP:
case LDA_IMM_OP:
case LDBIAS_IMM_OP:
@@ -1475,7 +1475,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
ret = emulate_store_int(ifa, u.insn, regs);
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 7601fe0622d2..6bd64c35e691 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -324,7 +324,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
return 0;
}
}
- /* fall through */
+ fallthrough;
case UNW_NAT_NONE:
dummy_nat = 0;
nat_addr = &dummy_nat;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 0b3fb4c7af29..8e7b8c6c576e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
- MEMMAP_EARLY, NULL);
+ MEMINIT_EARLY, NULL);
return 0;
}
@@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
- NULL);
+ memmap_init_zone(size, nid, zone, start_pfn,
+ MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6f2f38d05772..93bbb74ea876 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -6,32 +6,32 @@ config M68K
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_PREEMPT if !COLDFIRE
+ select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
- select HAVE_IDE
- select HAVE_AOUT if MMU
- select HAVE_ASM_MODVERSIONS
- select HAVE_DEBUG_BUGVERBOSE
- select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
- select NO_DMA if !MMU && !COLDFIRE
- select HAVE_UID16
- select VIRT_TO_BUS
- select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_IOMAP
+ select GENERIC_IRQ_SHOW
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select ARCH_WANT_IPC_PARSE_VERSION
+ select HAVE_AOUT if MMU
+ select HAVE_ASM_MODVERSIONS
+ select HAVE_DEBUG_BUGVERBOSE
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
+ select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_UID16
+ select MMU_GATHER_NO_RANGE if MMU
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
- select OLD_SIGSUSPEND3
+ select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION
- select MMU_GATHER_NO_RANGE if MMU
+ select OLD_SIGSUSPEND3
+ select VIRT_TO_BUS
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 8f23b2fab64c..bee9f240f35d 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -214,7 +214,7 @@ static void __init amiga_identify(void)
switch (amiga_model) {
case AMI_UNKNOWN:
- goto Generic;
+ break;
case AMI_600:
case AMI_1200:
@@ -227,7 +227,7 @@ static void __init amiga_identify(void)
case AMI_2000:
case AMI_2500:
AMIGAHW_SET(A2000_CLK); /* Is this correct for all models? */
- goto Generic;
+ break;
case AMI_3000:
case AMI_3000T:
@@ -238,7 +238,7 @@ static void __init amiga_identify(void)
AMIGAHW_SET(A3000_SCSI);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
- goto Generic;
+ break;
case AMI_4000T:
AMIGAHW_SET(A4000_SCSI);
@@ -247,68 +247,12 @@ static void __init amiga_identify(void)
AMIGAHW_SET(A4000_IDE);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
- goto Generic;
+ break;
case AMI_CDTV:
case AMI_CD32:
AMIGAHW_SET(CD_ROM);
AMIGAHW_SET(A2000_CLK); /* Is this correct? */
- goto Generic;
-
- Generic:
- AMIGAHW_SET(AMI_VIDEO);
- AMIGAHW_SET(AMI_BLITTER);
- AMIGAHW_SET(AMI_AUDIO);
- AMIGAHW_SET(AMI_FLOPPY);
- AMIGAHW_SET(AMI_KEYBOARD);
- AMIGAHW_SET(AMI_MOUSE);
- AMIGAHW_SET(AMI_SERIAL);
- AMIGAHW_SET(AMI_PARALLEL);
- AMIGAHW_SET(CHIP_RAM);
- AMIGAHW_SET(PAULA);
-
- switch (amiga_chipset) {
- case CS_OCS:
- case CS_ECS:
- case CS_AGA:
- switch (amiga_custom.deniseid & 0xf) {
- case 0x0c:
- AMIGAHW_SET(DENISE_HR);
- break;
- case 0x08:
- AMIGAHW_SET(LISA);
- break;
- }
- break;
- default:
- AMIGAHW_SET(DENISE);
- break;
- }
- switch ((amiga_custom.vposr>>8) & 0x7f) {
- case 0x00:
- AMIGAHW_SET(AGNUS_PAL);
- break;
- case 0x10:
- AMIGAHW_SET(AGNUS_NTSC);
- break;
- case 0x20:
- case 0x21:
- AMIGAHW_SET(AGNUS_HR_PAL);
- break;
- case 0x30:
- case 0x31:
- AMIGAHW_SET(AGNUS_HR_NTSC);
- break;
- case 0x22:
- case 0x23:
- AMIGAHW_SET(ALICE_PAL);
- break;
- case 0x32:
- case 0x33:
- AMIGAHW_SET(ALICE_NTSC);
- break;
- }
- AMIGAHW_SET(ZORRO);
break;
case AMI_DRACO:
@@ -318,6 +262,60 @@ static void __init amiga_identify(void)
panic("Unknown Amiga Model");
}
+ AMIGAHW_SET(AMI_VIDEO);
+ AMIGAHW_SET(AMI_BLITTER);
+ AMIGAHW_SET(AMI_AUDIO);
+ AMIGAHW_SET(AMI_FLOPPY);
+ AMIGAHW_SET(AMI_KEYBOARD);
+ AMIGAHW_SET(AMI_MOUSE);
+ AMIGAHW_SET(AMI_SERIAL);
+ AMIGAHW_SET(AMI_PARALLEL);
+ AMIGAHW_SET(CHIP_RAM);
+ AMIGAHW_SET(PAULA);
+
+ switch (amiga_chipset) {
+ case CS_OCS:
+ case CS_ECS:
+ case CS_AGA:
+ switch (amiga_custom.deniseid & 0xf) {
+ case 0x0c:
+ AMIGAHW_SET(DENISE_HR);
+ break;
+ case 0x08:
+ AMIGAHW_SET(LISA);
+ break;
+ default:
+ AMIGAHW_SET(DENISE);
+ break;
+ }
+ break;
+ }
+ switch ((amiga_custom.vposr>>8) & 0x7f) {
+ case 0x00:
+ AMIGAHW_SET(AGNUS_PAL);
+ break;
+ case 0x10:
+ AMIGAHW_SET(AGNUS_NTSC);
+ break;
+ case 0x20:
+ case 0x21:
+ AMIGAHW_SET(AGNUS_HR_PAL);
+ break;
+ case 0x30:
+ case 0x31:
+ AMIGAHW_SET(AGNUS_HR_NTSC);
+ break;
+ case 0x22:
+ case 0x23:
+ AMIGAHW_SET(ALICE_PAL);
+ break;
+ case 0x32:
+ case 0x33:
+ AMIGAHW_SET(ALICE_NTSC);
+ break;
+ }
+ AMIGAHW_SET(ZORRO);
+
#define AMIGAHW_ANNOUNCE(name, str) \
if (AMIGAHW_PRESENT(name)) \
pr_cont(str)
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 37091898adb3..5e0e682f9c61 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -207,7 +207,7 @@ repeat:
self_test_last_rcv = jiffies;
break;
}
- /* FALL THROUGH */
+ fallthrough;
default:
break_flag = scancode & BREAK_MASK;
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 522dcf624aa5..3cd76bfaee03 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -317,6 +317,7 @@ CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2433409f4369..c3d6faa7894f 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -346,6 +346,7 @@ CONFIG_DUMMY_IRQ=m
CONFIG_IDE=y
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_BLK_DEV_FALCON_IDE=y
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 015f1ca38305..3689c6718c88 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -68,4 +68,12 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 29de2b3108ea..493c95db0e51 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -57,7 +57,7 @@
* Of course, readability is a subjective issue, so it will never be
* argued that that goal was accomplished. It was merely a goal.
* A key way to help make code more readable is to give good
- * documentation. So, the first thing you will find is exaustive
+ * documentation. So, the first thing you will find is exhaustive
* write-ups on the structure of the file, and the features of the
* functional subroutines.
*
@@ -1304,7 +1304,7 @@ L(mmu_fixup_done):
* mmu_engage
*
* This chunk of code performs the gruesome task of engaging the MMU.
- * The reason its gruesome is because when the MMU becomes engaged it
+ * The reason it's gruesome is because when the MMU becomes engaged it
* maps logical addresses to physical addresses. The Program Counter
* register is then passed through the MMU before the next instruction
* is fetched (the instruction following the engage MMU instruction).
@@ -1369,7 +1369,7 @@ L(mmu_fixup_done):
/*
* After this point no new memory is allocated and
* the start of available memory is stored in availmem.
- * (The bootmem allocator requires now the physicall address.)
+ * (The bootmem allocator requires now the physical address.)
*/
movel L(memory_start),availmem
@@ -1547,7 +1547,7 @@ func_return get_bi_record
* seven bits of the logical address (LA) are used as an
* index into the "root table." Each entry in the root
* table has a bit which specifies if it's a valid pointer to a
- * pointer table. Each entry defines a 32KMeg range of memory.
+ * pointer table. Each entry defines a 32Meg range of memory.
* If an entry is invalid then that logical range of 32M is
* invalid and references to that range of memory (when the MMU
* is enabled) will fault. If the entry is valid, then it does
@@ -1584,7 +1584,7 @@ func_return get_bi_record
* bits 17..12 - index into the Page Table
* bits 11..0 - offset into a particular 4K page
*
- * The algorithms which follows do one thing: they abstract
+ * The algorithms which follow do one thing: they abstract
* the MMU hardware. For example, there are three kinds of
* cache settings that are relevant. Either, memory is
* being mapped in which case it is either Kernel Code (or
@@ -2082,7 +2082,7 @@ func_return mmu_map_tt
* mmu_map
*
* This routine will map a range of memory using a pointer
- * table and allocating the pages on the fly from the kernel.
+ * table and allocate the pages on the fly from the kernel.
* The pointer table does not have to be already linked into
* the root table, this routine will do that if necessary.
*
@@ -2528,7 +2528,7 @@ func_start mmu_get_root_table_entry,%d0/%a1
/* Find the start of free memory, get_bi_record does this for us,
* as the bootinfo structure is located directly behind the kernel
- * and and we simply search for the last entry.
+ * we simply search for the last entry.
*/
get_bi_record BI_LAST
addw #PAGESIZE-1,%a0
@@ -2654,7 +2654,7 @@ func_start mmu_get_page_table_entry,%d0/%a1
jne 2f
/* If the page table entry doesn't exist, we allocate a complete new
- * page and use it as one continues big page table which can cover
+ * page and use it as one continuous big page table which can cover
* 4MB of memory, nearly almost all mappings have that alignment.
*/
get_new_page
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index fc034fd19798..a98fca977073 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -1067,7 +1067,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->d0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 546e81935fe8..9e1261462bcc 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -845,7 +845,6 @@ static void show_trace(unsigned long *stack, const char *loglvl)
void show_registers(struct pt_regs *regs)
{
struct frame *fp = (struct frame *)regs;
- mm_segment_t old_fs = get_fs();
u16 c, *cp;
unsigned long addr;
int i;
@@ -918,10 +917,9 @@ void show_registers(struct pt_regs *regs)
show_stack(NULL, (unsigned long *)addr, KERN_INFO);
pr_info("Code:");
- set_fs(KERNEL_DS);
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
- if (get_user(c, cp + i) && i >= 0) {
+ if (get_kernel_nofault(c, cp + i) && i >= 0) {
pr_cont(" Bad PC value.");
break;
}
@@ -930,7 +928,6 @@ void show_registers(struct pt_regs *regs)
else
pr_cont(" <%04x>", c);
}
- set_fs(old_fs);
pr_cont("\n");
}
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 5c9f3a2d6538..0ac53d87493c 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/vt_kern.h>
#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
@@ -940,6 +941,26 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
+static const struct resource mac_ide_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x104),
+ DEFINE_RES_IRQ(IRQ_NUBUS_F),
+};
+
+static const struct resource mac_ide_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x104),
+ DEFINE_RES_IRQ(IRQ_NUBUS_C),
+};
+
+static const struct resource mac_pata_baboon_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
+ DEFINE_RES_IRQ(IRQ_BABOON_1),
+};
+
+static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+ .ioport_shift = 2,
+};
+
int __init mac_platform_init(void)
{
phys_addr_t swim_base = 0;
@@ -1018,7 +1039,7 @@ int __init mac_platform_init(void)
*/
platform_device_register_simple("mac_scsi", 1,
mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
- /* fall through */
+ fallthrough;
case MAC_SCSI_OLD:
/* Addresses from Developer Notes for Duo System,
* PowerBook 180 & 160, 140 & 170, Macintosh IIsi
@@ -1049,6 +1070,26 @@ int __init mac_platform_init(void)
}
/*
+ * IDE device
+ */
+
+ switch (macintosh_config->ide_type) {
+ case MAC_IDE_QUADRA:
+ platform_device_register_simple("mac_ide", -1,
+ mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ break;
+ case MAC_IDE_PB:
+ platform_device_register_simple("mac_ide", -1,
+ mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ break;
+ case MAC_IDE_BABOON:
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
+ &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ break;
+ }
+
+ /*
* Ethernet device
*/
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index 388780797f7d..4de6229c7bfd 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -116,7 +116,7 @@ static void mac_init_asc( void )
* support 16-bit stereo output, but only mono input."
*
* Technical Information Library (TIL) article number 16405.
- * http://support.apple.com/kb/TA32601
+ * https://support.apple.com/kb/TA32601
*
* --David Kilzer
*/
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 1f0fad2a98a0..ac77d73af19a 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -370,7 +370,7 @@ void via_nubus_irq_startup(int irq)
/* Allow NuBus slots 9 through F. */
via2[vDirA] &= 0x80 | ~(1 << irq_idx);
}
- /* fall through */
+ fallthrough;
case MAC_VIA_IICI:
via_irq_enable(irq);
break;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 795f483b1050..ef46e77e97a5 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -118,7 +118,7 @@ good_area:
pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
- /* fall through */
+ fallthrough;
case 2: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto acc_err;
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2b9cb4a62281..eac9dde65193 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -42,7 +42,7 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
int i;
- empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 2bb006bdc31c..3a653f0a4188 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -226,8 +226,8 @@ static pte_t * __init kernel_page_table(void)
{
pte_t *pte_table = last_pte_table;
- if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
- pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (PAGE_ALIGNED(last_pte_table)) {
+ pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_table) {
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
@@ -274,9 +274,8 @@ static pmd_t * __init kernel_ptr_table(void)
}
last_pmd_table += PTRS_PER_PMD;
- if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
- last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
- PAGE_SIZE);
+ if (PAGE_ALIGNED(last_pmd_table)) {
+ last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!last_pmd_table)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2e87a9b6d312..63bce836b9f1 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += extable.h
-generic-y += hw_irq.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 65bf5fd8d473..4a96b59f0bee 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -249,7 +249,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->r3 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 0880a003573d..3344d4a1fe89 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -46,6 +46,9 @@ unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c95fa3a2484c..8f328298f8cc 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -877,6 +877,7 @@ config SNI_RM
select I8253
select I8259
select ISA
+ select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 82627c264964..01427bde2397 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
+ if (c->cputype == CPU_74K) {
pr_info("Using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 75a7a382da09..3288cef4b168 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
+ case CPU_1074K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 47a8ffc0b413..f5b8300f4573 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return !(flags & 1);
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
#endif /* #ifndef __ASSEMBLY__ */
/*
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index d35eaed1668f..825d337a505a 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index b6e9c99b85a5..eb181224eb4c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -26,7 +26,6 @@
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_ejtag 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
@@ -42,7 +41,6 @@
#define cpu_has_veic 0
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
-#define cpu_has_watch 1
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index f5e362f79701..bf2480923154 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -2,8 +2,6 @@
#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
#define __ASM_MACH_LOONGSON64_IRQ_H_
-#include <boot_param.h>
-
/* cpu core interrupt numbers */
#define NR_IRQS_LEGACY 16
#define NR_MIPS_CPU_IRQS 8
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 3a25dbd3b3e9..5eaca4fe3f92 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
#define _ASM_MACH_LOONGSON64_MMZONE_H
-#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL
#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
index 7dd4a80e05d6..6f4ac854b12d 100644
--- a/arch/mips/include/asm/unroll.h
+++ b/arch/mips/include/asm/unroll.h
@@ -28,38 +28,38 @@
BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
- case 32: fn(__VA_ARGS__); /* fall through */ \
- case 31: fn(__VA_ARGS__); /* fall through */ \
- case 30: fn(__VA_ARGS__); /* fall through */ \
- case 29: fn(__VA_ARGS__); /* fall through */ \
- case 28: fn(__VA_ARGS__); /* fall through */ \
- case 27: fn(__VA_ARGS__); /* fall through */ \
- case 26: fn(__VA_ARGS__); /* fall through */ \
- case 25: fn(__VA_ARGS__); /* fall through */ \
- case 24: fn(__VA_ARGS__); /* fall through */ \
- case 23: fn(__VA_ARGS__); /* fall through */ \
- case 22: fn(__VA_ARGS__); /* fall through */ \
- case 21: fn(__VA_ARGS__); /* fall through */ \
- case 20: fn(__VA_ARGS__); /* fall through */ \
- case 19: fn(__VA_ARGS__); /* fall through */ \
- case 18: fn(__VA_ARGS__); /* fall through */ \
- case 17: fn(__VA_ARGS__); /* fall through */ \
- case 16: fn(__VA_ARGS__); /* fall through */ \
- case 15: fn(__VA_ARGS__); /* fall through */ \
- case 14: fn(__VA_ARGS__); /* fall through */ \
- case 13: fn(__VA_ARGS__); /* fall through */ \
- case 12: fn(__VA_ARGS__); /* fall through */ \
- case 11: fn(__VA_ARGS__); /* fall through */ \
- case 10: fn(__VA_ARGS__); /* fall through */ \
- case 9: fn(__VA_ARGS__); /* fall through */ \
- case 8: fn(__VA_ARGS__); /* fall through */ \
- case 7: fn(__VA_ARGS__); /* fall through */ \
- case 6: fn(__VA_ARGS__); /* fall through */ \
- case 5: fn(__VA_ARGS__); /* fall through */ \
- case 4: fn(__VA_ARGS__); /* fall through */ \
- case 3: fn(__VA_ARGS__); /* fall through */ \
- case 2: fn(__VA_ARGS__); /* fall through */ \
- case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
case 0: break; \
\
default: \
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index efce5defcc5c..011eb6bbf81a 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
- (base_id >= 256 && base_id < 274) ||
- (base_id >= 320 && base_id < 358) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2f513506a3d5..1dbfb5aadffd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38aa07ccdbcc..cf788591f091 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1287,6 +1287,18 @@ static int enable_restore_fp_context(int msa)
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 87fa8d8a1031..28c366d307e7 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
index 4ab55f1123a0..ae023b9a1c51 100644
--- a/arch/mips/loongson2ef/Platform
+++ b/arch/mips/loongson2ef/Platform
@@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
endif
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
#
# Loongson Machines' Support
#
diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
index f130f62129b8..00055d4b6042 100644
--- a/arch/mips/loongson64/cop2-ex.c
+++ b/arch/mips/loongson64/cop2-ex.c
@@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt, value);
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq, value_next);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
compute_return_epc(regs);
own_fpu(1);
}
@@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value_next = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rq);
+ value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
StoreDW(addr + 8, value_next, res);
if (res)
goto fault;
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lswc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
@@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
@@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;
- set_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
@@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreW(addr, value, res);
if (res)
@@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;
lose_fpu(1);
- value = get_fpr64(current->thread.fpu.fpr,
- insn.loongson3_lsdc2_format.rt);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
StoreDW(addr, value, res);
if (res)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fc5a6d25f74f..0ef717093262 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1712,7 +1712,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1493c49ca47a..55d7b7fd18b6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void)
switch (counters) {
#define HANDLE_COUNTER(n) \
- fallthrough; \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
@@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void)
handled = IRQ_HANDLED; \
}
HANDLE_COUNTER(3)
+ fallthrough;
HANDLE_COUNTER(2)
+ fallthrough;
HANDLE_COUNTER(1)
+ fallthrough;
HANDLE_COUNTER(0)
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 0ecffb65fd6d..eeeec18c420a 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
},
};
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
{
u32 status = read_c0_status();
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
int irq;
clear_c0_status(IE_IRQ0);
- status = a20r_ack_hwint();
+ status = a20r_update_cause_ip();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
set_c0_status(IE_IRQ0);
}
@@ -222,8 +227,8 @@ void __init sni_a20r_irq_init(void)
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
- NULL))
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
pr_err("Failed to register ISA interrupt\n");
}
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h
index fb45ec46bb1b..51ef800bb301 100644
--- a/arch/nds32/include/asm/irqflags.h
+++ b/arch/nds32/include/asm/irqflags.h
@@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !flags;
}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index 62bdafbc53f4..9edd7ed7d7bf 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -45,7 +45,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fsdi $fd15, [%0+0x78]\n\t"
"fsdi $fd14, [%0+0x70]\n\t"
@@ -58,7 +58,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fsdi $fd7, [%0+0x38]\n\t"
"fsdi $fd6, [%0+0x30]\n\t"
@@ -67,7 +67,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fsdi $fd3, [%1+0x18]\n\t"
"fsdi $fd2, [%1+0x10]\n\t"
@@ -108,7 +108,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd16, [%0+0x80]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fldi $fd15, [%0+0x78]\n\t"
"fldi $fd14, [%0+0x70]\n\t"
@@ -120,7 +120,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd8, [%0+0x40]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fldi $fd7, [%0+0x38]\n\t"
"fldi $fd6, [%0+0x30]\n\t"
@@ -128,7 +128,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd4, [%0+0x20]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fldi $fd3, [%1+0x18]\n\t"
"fldi $fd2, [%1+0x10]\n\t"
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index 330b19fcd990..36e25a410bb0 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,7 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->uregs[0] = -EINTR;
break;
}
- /* Else, fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->uregs[0] = regs->orig_r0;
regs->ipc -= 4;
@@ -361,7 +361,7 @@ static void do_signal(struct pt_regs *regs)
switch (regs->uregs[0]) {
case -ERESTART_RESTARTBLOCK:
regs->uregs[15] = __NR_restart_syscall;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f0390211236b..120f5005461b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -165,19 +165,19 @@ struct __large_struct {
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr))) 0; \
__gu_err; \
})
@@ -191,11 +191,13 @@ do { \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
+{ \
+ unsigned long __gu_tmp; \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
"2:\n" \
@@ -209,10 +211,14 @@ do { \
" .align 2\n" \
" .long 1b,3b\n" \
".previous" \
- : "=r"(err), "=r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))__gu_tmp; \
+}
#define __get_user_asm2(x, addr, err) \
+{ \
+ unsigned long long __gu_tmp; \
__asm__ __volatile__( \
"1: l.lwz %1,0(%2)\n" \
"2: l.lwz %H1,4(%2)\n" \
@@ -229,8 +235,11 @@ do { \
" .long 1b,4b\n" \
" .long 2b,4b\n" \
".previous" \
- : "=r"(err), "=&r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=&r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))( \
+ (__typeof__((x)-(x)))__gu_tmp); \
+}
/* more complex routines */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index b18e775f8be3..13c87f1f872b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -80,6 +80,16 @@ static void __init setup_memory(void)
*/
memblock_reserve(__pa(_stext), _end - _stext);
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Then reserve the initrd, if any */
+ if (initrd_start && (initrd_end > initrd_start)) {
+ unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
+
+ memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 97804f21a40c..c779364f0cd0 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -244,7 +244,7 @@ int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart = -2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 08f56af387ac..534a52ec5e66 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -16,7 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static void cache_loop(struct page *page, const unsigned int reg)
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 5df5d4cd5d4c..3c037fc96038 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -502,7 +502,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->gr[28] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
check_syscallno_in_delay_branch(regs);
break;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 43875c289723..a52c7abf2ca4 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -437,7 +437,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
break;
default:
- /* Fall through */
break;
}
@@ -644,12 +643,12 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 15:
/* Data TLB miss fault/Data page fault */
- /* Fall through */
+ fallthrough;
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
- /* Fall through */
+ fallthrough;
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
@@ -673,7 +672,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
handle_unaligned(regs);
return;
}
- /* Fall Through */
+ fallthrough;
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
@@ -683,7 +682,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
- /* fall thru */
+ fallthrough;
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
@@ -730,7 +729,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
- /* Fall Through */
+ fallthrough;
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 4bfe2da9fbe3..716960f5d92e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -67,7 +67,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
- /* fall through */
+ fallthrough;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
@@ -370,7 +370,7 @@ bad_area:
}
/* probably address is outside of mapped file */
- /* fall through */
+ fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1f48bbfb3ce9..4a02cab105fd 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -116,7 +116,6 @@ config PPC
#
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
- select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -136,7 +135,7 @@ config PPC
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
- select ARCH_HAS_UACCESS_MCSAFE if PPC64
+ select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
@@ -860,6 +859,18 @@ config PPC_SUBPAGE_PROT
If unsure, say N here.
+config PPC_PROT_SAO_LPAR
+ bool "Support PROT_SAO mappings in LPARs"
+ depends on PPC_BOOK3S_64
+ help
+ This option adds support for PROT_SAO mappings from userspace
+ inside LPARs on supported CPUs.
+
+ This may cause issues when performing guest migration from
+ a CPU that supports SAO to one that does not.
+
+ If unsure, say N here.
+
config PPC_COPRO_BASE
bool
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index af9af03059e4..15ed8d0aa014 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 5e6f92ba3210..66e9a0fd64ff 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 55442d45c597..b392384a3b15 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size);
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- if (early_radix_enabled())
- return radix__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
+ /*
+ * Hash has more strict restrictions. At this point we don't
+ * know which translations we will pick. Hence go with hash
+ * restrictions.
+ */
return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6de56c3b33c4..495fc0ccb453 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -20,13 +20,9 @@
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
-
-#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
- /* No bits set is normal cacheable memory */
- /* 0x00010 unused, is SAO bit on radix POWER9 */
+#define _PAGE_SAO 0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
-
#define _PAGE_DIRTY 0x00080 /* C: page changed */
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
/*
@@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
}
+#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index fdddb822d564..32a15dc49e8c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -9,6 +9,11 @@
#ifndef __ASSEMBLY__
+/*
+ * Added to include __machine_check_early_realmode_* functions
+ */
+#include <asm/mce.h>
+
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mechanism.
*/
@@ -191,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
-// Free LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
@@ -436,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
@@ -445,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
@@ -456,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
@@ -474,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 925cf89cbf4b..6bfc87915d5d 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -52,7 +52,7 @@ enum fixed_addresses {
FIX_HOLE,
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 3a0db7b0b46e..35060be09073 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void)
#define powerpc_local_irq_pmu_save(flags) \
do { \
raw_local_irq_pmu_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while(0)
#define powerpc_local_irq_pmu_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_pmu_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_pmu_restore(flags); \
- } \
+ raw_local_irq_pmu_restore(flags); \
} while(0)
#else
#define powerpc_local_irq_pmu_save(flags) \
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index d635b96c7ea6..7355ed05e65e 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -15,11 +15,18 @@
#ifndef __ASSEMBLY__
#include <asm/page.h>
+#include <linux/sizes.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
+#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
+#else
+#define KASAN_KERN_START PAGE_OFFSET
+#endif
+
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
- (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
+ (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e020d269416d..10ded83414de 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -58,7 +58,8 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ unsigned flags);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index adf2cda67f9a..89aa8248a57d 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -210,6 +210,9 @@ struct mce_error_info {
#define MCE_EVENT_RELEASE true
#define MCE_EVENT_DONTRELEASE false
+struct pt_regs;
+struct notifier_block;
+
extern void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err, uint64_t nip,
uint64_t addr, uint64_t phys_addr);
@@ -225,5 +228,9 @@ int mce_register_notifier(struct notifier_block *nb);
int mce_unregister_notifier(struct notifier_block *nb);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
+long __machine_check_early_realmode_p7(struct pt_regs *regs);
+long __machine_check_early_realmode_p8(struct pt_regs *regs);
+long __machine_check_early_realmode_p9(struct pt_regs *regs);
+long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7c07728af300..7cb6d18f5cd6 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,20 +13,43 @@
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
-#ifdef CONFIG_PPC_MEM_KEYS
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- return pkey_to_vmflag_bits(pkey);
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+ return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (vm_flags & VM_SAO) ?
+ __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+ __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-#endif
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
+ return true;
+}
+#define arch_validate_prot arch_validate_prot
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 59ee9fa4ae09..6cb8aa357191 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -82,6 +82,8 @@
*/
#include <asm/nohash/pte-book3e.h>
+#define _PAGE_SAO 0
+
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
/*
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 1e8b2e1ec1db..daec64d41b44 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -40,4 +40,7 @@ static inline bool is_sier_available(void) { return false; }
/* To support perf_regs sier update */
extern bool is_sier_available(void);
+/* To define perf extended regs mask value */
+extern u64 PERF_REG_EXTENDED_MASK;
+#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
#endif
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 86c9eb064b22..f6acabb6c9be 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -62,6 +62,11 @@ struct power_pmu {
int *blacklist_ev;
/* BHRB entries in the PMU */
int bhrb_nr;
+ /*
+ * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
+ * the pmu supports extended perf regs capability
+ */
+ int capabilities;
};
/*
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 283552cd0e58..2aa0e31e6884 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
-#define __HAVE_ARCH_MEMCPY_MCSAFE
-extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 00699903f1ef..20a35373cafc 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -435,6 +435,32 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (likely(check_copy_size(from, n, true))) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -523,20 +549,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return ret;
}
-static __always_inline unsigned long __must_check
-copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
-{
- if (likely(check_copy_size(from, n, true))) {
- if (access_ok(to, n)) {
- allow_write_to_user(to, n);
- n = memcpy_mcsafe((void *)to, from, n);
- prevent_write_to_user(to, n);
- }
- }
-
- return n;
-}
-
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
index 3a700351feca..c0c737215b00 100644
--- a/arch/powerpc/include/uapi/asm/mman.h
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -11,7 +11,7 @@
#include <asm-generic/mman-common.h>
-#define PROT_SAO 0x10 /* Unsupported since v5.9 */
+#define PROT_SAO 0x10 /* Strong Access Ordering */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index f599064dd8dc..bdf5f10f8b9f 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -48,6 +48,24 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
- PERF_REG_POWERPC_MAX,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ /* Max regs without the extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
+#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
+#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
+
+#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1)
+#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3d406a9626e8..2aa89c6b2896 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -72,9 +72,6 @@ extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power9(void);
extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power10(void);
-extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -542,6 +539,25 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
+ { /* Power10 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00800000,
+ .cpu_name = "POWER10 (raw)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power10",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .machine_check_early = __machine_check_early_realmode_p10,
+ .platform = "power10",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 569fecd7b5b2..9053fc9d20c7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
if (!tbl)
return 0;
- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+ tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 6f8c0c6b937a..f204ad79b6b5 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -64,10 +64,6 @@ struct dt_cpu_feature {
* Set up the base CPU
*/
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p10(struct pt_regs *regs);
-
static int hv_mode;
static struct {
@@ -657,7 +653,7 @@ static struct dt_cpu_feature_match __initdata
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
{"no-execute", feat_enable, 0},
- /* strong-access-ordering is unused */
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
{"coprocessor-icswx", feat_enable, 0},
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 33a42e42c56f..733e40eba4eb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
/*
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
* would clobber syscall parameters. Also we always enter with IRQs
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 016bd831908e..73a57043ee66 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs)
* are live for the user thread).
*/
if ((!(msr & MSR_FP)) && should_restore_fp())
- new_msr |= MSR_FP | current->thread.fpexc_mode;
+ new_msr |= MSR_FP;
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
@@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs)
}
if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
msr_check_and_set(new_msr);
- if (new_msr & MSR_FP)
+ if (new_msr & MSR_FP) {
do_restore_fp();
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
if (new_msr & MSR_VEC)
do_restore_altivec();
@@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr;
+ regs->msr |= new_msr | fpexc_mode;
}
}
#endif
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index b198b0ff25bc..808ec9fab605 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
+ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 87ab1152d5ce..e147bbdc12cd 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
+ cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 4c985467a668..5206c2eb2a1d 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -111,7 +111,6 @@ SECTIONS
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 38c317f25141..32ebb3522ea1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
+ cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4e3a8d4ee614..256fb9720298 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -30,7 +30,7 @@ SECTIONS
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
- *(.sfpr)
+ *(.sfpr .glink)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
@@ -111,7 +111,6 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 41fedec69ac3..49db50d1db04 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index d6c1069e9954..ed0c9c43d0cf 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
/* kvm_unmap_hva flushes everything anyways */
kvm_unmap_hva(kvm, start);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d66a645503eb..69a91b571845 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
memcpy_power7.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
- memcpy_64.o memcpy_mcsafe_64.o
+ memcpy_64.o copy_mc_64.o
ifndef CONFIG_PPC_QUEUED_SPINLOCKS
obj64-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/copy_mc_64.S
index cb882d9a6d8a..88d46c471493 100644
--- a/arch/powerpc/lib/memcpy_mcsafe_64.S
+++ b/arch/powerpc/lib/copy_mc_64.S
@@ -50,7 +50,7 @@ err3; stb r0,0(r3)
blr
-_GLOBAL(memcpy_mcsafe)
+_GLOBAL(copy_mc_generic)
mr r7,r5
cmpldi r5,16
blt .Lshort_copy
@@ -239,4 +239,4 @@ err1; stb r0,0(r3)
15: li r3,0
blr
-EXPORT_SYMBOL_GPL(memcpy_mcsafe);
+EXPORT_SYMBOL_GPL(copy_mc_generic);
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index c0162911f6cb..d426eaf76bb0 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -191,10 +191,17 @@ static bool is_module_segment(unsigned long addr)
{
if (!IS_ENABLED(CONFIG_MODULES))
return false;
+#ifdef MODULES_VADDR
+ if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
+ return false;
+ if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
+ return false;
+#else
if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
return false;
- if (addr >= ALIGN(VMALLOC_END, SZ_256M))
+ if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
return false;
+#endif
return true;
}
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 1da9dbba9217..c663e7ba801f 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
rflags |= HPTE_R_I;
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
else
/*
* Add memory coherence if cache inhibited is not set
@@ -1115,8 +1117,10 @@ void hash__early_init_mmu_secondary(void)
&& cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
- if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY))
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (mmu_has_feature(MMU_FTR_PKEY))
mtspr(SPRN_UAMOR, default_uamor);
+#endif
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 28c784976bed..d5f0c10d752a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
}
}
-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- /*
- * We don't currently support the first MEMBLOCK not mapping 0
- * physical on those processors
- */
- BUG_ON(first_memblock_base != 0);
-
- /*
- * Radix mode is not limited by RMA / VRMA addressing.
- */
- ppc64_rma_size = ULONG_MAX;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 02e127fa5777..8459056cce67 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
if (!(mfmsr() & MSR_HV))
early_check_vec5();
- if (early_radix_enabled())
+ if (early_radix_enabled()) {
radix__early_init_devtree();
- else
+ /*
+ * We have finalized the translation we are going to use by now.
+ * Radix mode is not limited by RMA / VRMA addressing.
+ * Hence don't limit memblock allocations.
+ */
+ ppc64_rma_size = ULONG_MAX;
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ } else
hash__early_init_devtree();
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 16d09b36fe06..e809cb5a1631 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -475,7 +475,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE;
- /* Fall through */
cond_branch:
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 78fe34986594..08643cba1494 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1557,9 +1557,16 @@ nocheck:
ret = 0;
out:
if (has_branch_stack(event)) {
- power_pmu_bhrb_enable(event);
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter != -1) {
+ cpuhw->bhrb_filter = bhrb_filter;
+ power_pmu_bhrb_enable(event);
+ }
}
perf_pmu_enable(event->pmu);
@@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
- u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- bhrb_filter = ppmu->bhrb_filter_map(
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
@@ -2141,6 +2150,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
+ } else if (period) {
+ /* Account for interrupt in case of invalid SIAR */
+ if (perf_event_account_interrupt(event))
+ power_pmu_stop(event, 0);
}
}
@@ -2323,6 +2336,7 @@ int register_power_pmu(struct power_pmu *pmu)
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
+ power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index cdb7bfbd157e..6e7e820508df 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1128,6 +1128,15 @@ static struct bin_attribute *if_bin_attrs[] = {
NULL,
};
+static struct attribute *cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+ .attrs = cpumask_attrs,
+};
+
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
@@ -1135,7 +1144,6 @@ static struct attribute *if_attrs[] = {
&dev_attr_sockets.attr,
&dev_attr_chipspersocket.attr,
&dev_attr_coresperchip.attr,
- &dev_attr_cpumask.attr,
NULL,
};
@@ -1151,6 +1159,7 @@ static const struct attribute_group *attr_groups[] = {
&event_desc_group,
&event_long_desc_group,
&if_group,
+ &cpumask_attr_group,
NULL,
};
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index a45d694a5d5d..62d0b54086f8 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc = 0;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
+ switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
case 0:/* when MSR HV and PR not set in the trace-record */
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
break;
@@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc |= PERF_RECORD_MISC_GUEST_USER;
break;
case 2: /* MSR HV is 1 and PR is 0 */
- header->misc |= PERF_RECORD_MISC_HYPERVISOR;
+ header->misc |= PERF_RECORD_MISC_KERNEL;
break;
case 3: /* MSR HV is 1 and PR is 1 */
header->misc |= PERF_RECORD_MISC_USER;
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index a213a0aa5d25..8e53f2fc3fe0 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -13,9 +13,11 @@
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
+u64 PERF_REG_EXTENDED_MASK;
+
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
-#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
+#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
@@ -69,10 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
+/* Function to return the extended register values */
+static u64 get_ext_regs_value(int idx)
+{
+ switch (idx) {
+ case PERF_REG_POWERPC_MMCR0:
+ return mfspr(SPRN_MMCR0);
+ case PERF_REG_POWERPC_MMCR1:
+ return mfspr(SPRN_MMCR1);
+ case PERF_REG_POWERPC_MMCR2:
+ return mfspr(SPRN_MMCR2);
+#ifdef CONFIG_PPC64
+ case PERF_REG_POWERPC_MMCR3:
+ return mfspr(SPRN_MMCR3);
+ case PERF_REG_POWERPC_SIER2:
+ return mfspr(SPRN_SIER2);
+ case PERF_REG_POWERPC_SIER3:
+ return mfspr(SPRN_SIER3);
+#endif
+ default: return 0;
+ }
+}
+
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
- if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
- return 0;
+ u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_300;
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
@@ -85,6 +113,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
IS_ENABLED(CONFIG_PPC32)))
return 0;
+ if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
+ return get_ext_regs_value(idx);
+
+ /*
+ * If the idx is referring to value beyond the
+ * supported registers, return 0 with a warning
+ */
+ if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index f7cff7f36a1c..83148656b524 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -87,6 +87,8 @@
#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
@@ -397,6 +399,7 @@ static struct power_pmu power10_pmu = {
.cache_events = &power10_cache_events,
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power10_pmu(void)
@@ -408,6 +411,9 @@ int init_power10_pmu(void)
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
return -ENODEV;
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
+
rc = register_power_pmu(&power10_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 05dae38b969a..2a57e93a79dc 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -90,6 +90,8 @@ enum {
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@@ -434,6 +436,7 @@ static struct power_pmu power9_pmu = {
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power9_pmu(void)
@@ -457,6 +460,9 @@ int init_power9_pmu(void)
}
}
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
+
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 87737ec86d39..1dc9d3c81872 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
select PPC_HAVE_PMU_SUPPORT
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
- select HAVE_ARCH_VMAP_STACK
+ select HAVE_ARCH_VMAP_STACK if !ADB_PMU
config PPC_BOOK3S_601
bool "PowerPC 601"
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 77513a80cef9..345ab062b21a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void)
return;
}
- if (pvr_version_is(PVR_POWER9))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
pnv_power9_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index c9c25fb0783c..023a4f987bb2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2705,7 +2705,7 @@ void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
struct iommu_table *tbl = pe->table_group.tables[0];
int64_t rc;
- if (pe->dma_setup_done)
+ if (!pe->dma_setup_done)
return;
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index c6e0d8abf75e..7a974ed6b240 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
*/
static void pseries_cpu_die(unsigned int cpu)
{
- int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
+ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
- for (tries = 0; tries < 25; tries++) {
+ while (true) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
- cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
+ cpu, pcpu);
+ timeout = jiffies + msecs_to_jiffies(120000);
+ }
+
+ cond_resched();
}
- if (cpu_status != 0) {
- printk("Querying DEAD? cpu %i (%i) shows %i\n",
- cpu, pcpu, cpu_status);
+ if (cpu_status == QCSS_HARDWARE_ERROR) {
+ pr_warn("CPU %i (hwid %i) reported error while dying\n",
+ cpu, pcpu);
}
/* Isolation and deallocation are definitely done by
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f439f0dfea7d..a88a707a608a 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -822,7 +822,7 @@ free_stats:
kfree(stats);
return rc ? rc : seq_buf_used(&s);
}
-DEVICE_ATTR_RO(perf_stats);
+DEVICE_ATTR_ADMIN_RO(perf_stats);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index f3736fcd98fc..13c86a292c6d 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
- orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 7b5905529146..7766e1289468 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,6 +32,7 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
+ select CLINT_TIMER if !MMU
select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
@@ -81,7 +82,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_INTC
- select RISCV_TIMER
+ select RISCV_TIMER if RISCV_SBI
select SPARSEMEM_STATIC if 32BIT
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 6c88148f1b9b..8a55f6156661 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -12,6 +12,7 @@ config SOC_SIFIVE
config SOC_VIRT
bool "QEMU Virt Machine"
+ select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
select POWER_RESET_SYSCON
select POWER_RESET_SYSCON_POWEROFF
@@ -24,6 +25,7 @@ config SOC_VIRT
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
+ select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
index c1df56ccb8d5..d2d0ff645632 100644
--- a/arch/riscv/boot/dts/kendryte/k210.dtsi
+++ b/arch/riscv/boot/dts/kendryte/k210.dtsi
@@ -95,10 +95,12 @@
#clock-cells = <1>;
};
- clint0: interrupt-controller@2000000 {
+ clint0: clint@2000000 {
+ #interrupt-cells = <1>;
compatible = "riscv,clint0";
reg = <0x2000000 0xC000>;
- interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7>;
clocks = <&sysctl K210_CLK_ACLK>;
};
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index f27596e9663e..e046a0babde4 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -26,6 +26,7 @@ CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
+CONFIG_SOC_VIRT=y
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
@@ -49,7 +50,6 @@ CONFIG_VIRTIO_BLK=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@@ -57,16 +57,13 @@ CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-CONFIG_SIFIVE_PLIC=y
-# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 3a55f0e00d6c..2c2cda6cc1c5 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
@@ -62,6 +63,8 @@ CONFIG_HVC_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y
@@ -77,6 +80,8 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
index a279b17a6aad..0789fd37b40a 100644
--- a/arch/riscv/include/asm/clint.h
+++ b/arch/riscv/include/asm/clint.h
@@ -1,39 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
#ifndef _ASM_RISCV_CLINT_H
-#define _ASM_RISCV_CLINT_H 1
+#define _ASM_RISCV_CLINT_H
-#include <linux/io.h>
-#include <linux/smp.h>
+#include <linux/types.h>
+#include <asm/mmio.h>
#ifdef CONFIG_RISCV_M_MODE
-extern u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void);
-
-static inline void clint_send_ipi_single(unsigned long hartid)
-{
- writel(1, clint_ipi_base + hartid);
-}
-
-static inline void clint_send_ipi_mask(const struct cpumask *mask)
-{
- int cpu;
-
- for_each_cpu(cpu, mask)
- clint_send_ipi_single(cpuid_to_hartid_map(cpu));
-}
-
-static inline void clint_clear_ipi(unsigned long hartid)
-{
- writel(0, clint_ipi_base + hartid);
-}
-#else /* CONFIG_RISCV_M_MODE */
-#define clint_init_boot_cpu() do { } while (0)
-
-/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
-void clint_send_ipi_single(unsigned long hartid);
-void clint_send_ipi_mask(const struct cpumask *hartid_mask);
-void clint_clear_ipi(unsigned long hartid);
-#endif /* CONFIG_RISCV_M_MODE */
-
-#endif /* _ASM_RISCV_CLINT_H */
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index ace8a6e2d11d..845002cc2e57 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -66,6 +66,13 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
#endif
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 6dfd2a1446d5..df1f7c4cd433 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -15,6 +15,11 @@
struct seq_file;
extern unsigned long boot_cpu_hartid;
+struct riscv_ipi_ops {
+ void (*ipi_inject)(const struct cpumask *target);
+ void (*ipi_clear)(void);
+};
+
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
@@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out);
}
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
index d95f7b2a7f37..5962f8891f06 100644
--- a/arch/riscv/include/asm/stackprotector.h
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -5,7 +5,6 @@
#include <linux/random.h>
#include <linux/version.h>
-#include <asm/timex.h>
extern unsigned long __stack_chk_guard;
@@ -18,12 +17,9 @@ extern unsigned long __stack_chk_guard;
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
- unsigned long tsc;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
- tsc = get_cycles();
- canary += tsc + (tsc << BITS_PER_LONG/2);
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index bad2a7c2cda5..ab104905d4db 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -7,41 +7,65 @@
#define _ASM_RISCV_TIMEX_H
#include <asm/csr.h>
-#include <asm/mmio.h>
typedef unsigned long cycles_t;
-extern u64 __iomem *riscv_time_val;
-extern u64 __iomem *riscv_time_cmp;
+#ifdef CONFIG_RISCV_M_MODE
-#ifdef CONFIG_64BIT
-#define mmio_get_cycles() readq_relaxed(riscv_time_val)
-#else
-#define mmio_get_cycles() readl_relaxed(riscv_time_val)
-#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
-#endif
+#include <asm/clint.h>
+#ifdef CONFIG_64BIT
static inline cycles_t get_cycles(void)
{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIME);
- return mmio_get_cycles();
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
}
#define get_cycles get_cycles
-#ifdef CONFIG_64BIT
-static inline u64 get_cycles64(void)
+static inline u32 get_cycles_hi(void)
{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+/*
+ * Much like MIPS, we may not have a viable counter to use at an early point
+ * in the boot process. Unfortunately we don't have a fallback, so instead
+ * we just return 0.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ if (unlikely(clint_time_val == NULL))
+ return 0;
return get_cycles();
}
-#else /* CONFIG_64BIT */
+#define random_get_entropy() random_get_entropy()
+
+#else /* CONFIG_RISCV_M_MODE */
+
+static inline cycles_t get_cycles(void)
+{
+ return csr_read(CSR_TIME);
+}
+#define get_cycles get_cycles
+
static inline u32 get_cycles_hi(void)
{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIMEH);
- return mmio_get_cycles_hi();
+ return csr_read(CSR_TIMEH);
}
+#define get_cycles_hi get_cycles_hi
+#ifdef CONFIG_64BIT
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
static inline u64 get_cycles64(void)
{
u32 hi, lo;
@@ -55,6 +79,8 @@ static inline u64 get_cycles64(void)
}
#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_RISCV_M_MODE */
+
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index a5287ab9f7f2..dc93710f0b2f 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -31,7 +31,7 @@ obj-y += cacheinfo.o
obj-y += patch.o
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o
+obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c
deleted file mode 100644
index 3647980d14c3..000000000000
--- a/arch/riscv/kernel/clint.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Christoph Hellwig.
- */
-
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/types.h>
-#include <asm/clint.h>
-#include <asm/csr.h>
-#include <asm/timex.h>
-#include <asm/smp.h>
-
-/*
- * This is the layout used by the SiFive clint, which is also shared by the qemu
- * virt platform, and the Kendryte KD210 at least.
- */
-#define CLINT_IPI_OFF 0
-#define CLINT_TIME_CMP_OFF 0x4000
-#define CLINT_TIME_VAL_OFF 0xbff8
-
-u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void)
-{
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
- if (!np) {
- panic("clint not found");
- return;
- }
-
- base = of_iomap(np, 0);
- if (!base)
- panic("could not map CLINT");
-
- clint_ipi_base = base + CLINT_IPI_OFF;
- riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
- riscv_time_val = base + CLINT_TIME_VAL_OFF;
-
- clint_clear_ipi(boot_cpu_hartid);
-}
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 2ff63d0cbb50..99e12faa5498 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ int out;
+
+ ftrace_arch_code_modify_prepare();
+ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ftrace_arch_code_modify_post_process();
+
+ return out;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index f383ef5672b2..226ccce0f9e0 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+ struct cpumask hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+ .ipi_inject = sbi_send_cpumask_ipi
+};
int __init sbi_init(void)
{
@@ -587,5 +599,7 @@ int __init sbi_init(void)
__sbi_rfence = __sbi_rfence_v01;
}
+ riscv_set_ipi_ops(&sbi_ipi_ops);
+
return 0;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f04373be54a6..2c6dd329312b 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -18,7 +18,6 @@
#include <linux/swiotlb.h>
#include <linux/smp.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -79,7 +78,6 @@ void __init setup_arch(char **cmdline_p)
#else
unflatten_device_tree();
#endif
- clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 17ba190e84a5..e996e08f1061 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->epc -= 0x4;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 554b0fb47060..ea028d9e0d24 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/irq_work.h>
-#include <asm/clint.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -86,9 +85,25 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+ ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+ if (ipi_ops && ipi_ops->ipi_clear)
+ ipi_ops->ipi_clear();
+
+ csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
- struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
@@ -96,33 +111,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(mask);
else
- clint_send_ipi_mask(mask);
+ pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- int hartid = cpuid_to_hartid_map(cpu);
-
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
- else
- clint_send_ipi_single(hartid);
-}
-
-static inline void clear_ipi(void)
-{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- csr_clear(CSR_IP, IE_SIE);
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(cpumask_of(cpu));
else
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
@@ -140,7 +144,7 @@ void handle_IPI(struct pt_regs *regs)
irq_enter();
- clear_ipi();
+ riscv_clear_ipi();
while (true) {
unsigned long ops;
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 356825a57551..96167d55ed98 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -147,8 +146,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
- if (!IS_ENABLED(CONFIG_RISCV_SBI))
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index f3586e31ed1e..34d00d9e6eac 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -22,13 +22,11 @@ SECTIONS
/* Beginning of code and text segment */
. = LOAD_OFFSET;
_start = .;
- _stext = .;
HEAD_TEXT_SECTION
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(16)
. = ALIGN(8);
__soc_early_init_table : {
__soc_early_init_table_start = .;
@@ -55,6 +53,7 @@ SECTIONS
. = ALIGN(SECTION_ALIGN);
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -67,6 +66,8 @@ SECTIONS
_etext = .;
}
+ INIT_DATA_SECTION(16)
+
/* Start of data section */
_sdata = .;
RO_DATA(SECTION_ALIGN)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 787c75f751a5..f750e012dbe5 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
ptep = &fixmap_pte[pte_index(addr)];
- if (pgprot_val(prot)) {
+ if (pgprot_val(prot))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
- } else {
+ else
pte_clear(&init_mm, addr, ptep);
- local_flush_tlb_page(addr);
- }
+ local_flush_tlb_page(addr);
}
static pte_t *__init get_pte_virt(phys_addr_t pa)
@@ -516,6 +515,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#else
dtb_early_va = (void *)dtb_pa;
#endif
+ dtb_early_pa = dtb_pa;
}
static inline void setup_vm_final(void)
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index bc5f2204693f..579575f9cdae 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1020,7 +1020,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_zext64(dst, ctx);
break;
}
- /* Fallthrough. */
+ fallthrough;
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
@@ -1079,7 +1079,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case 16:
emit(rv_slli(lo(rd), lo(rd), 16), ctx);
emit(rv_srli(lo(rd), lo(rd), 16), ctx);
- /* Fallthrough. */
+ fallthrough;
case 32:
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3d86e12e8e3c..b29fcc66ec39 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config GENERIC_LOCKBREAK
- def_bool y if PREEMPTTION
+ def_bool y if PREEMPTION
config PGSTE
def_bool y if KVM
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 0cf9a82326a8..7228aabe9da6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -626,6 +626,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -807,6 +808,7 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
@@ -818,6 +820,7 @@ CONFIG_PREEMPT_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
@@ -829,6 +832,7 @@ CONFIG_FAIL_MAKE_REQUEST=y
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 5df9759e8ff6..fab03b7a6932 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -617,6 +617,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -763,6 +764,7 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
@@ -771,6 +773,7 @@ CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 4091c50449cd..8f67c55625f9 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -74,5 +74,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 50b4ce8cddfd..918f0ba4f4d2 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -29,7 +29,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
@@ -37,7 +37,7 @@
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
new__; \
})
@@ -68,7 +68,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
@@ -84,7 +84,7 @@
: [val__] "d" (val__) \
: "cc"); \
} \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
@@ -95,14 +95,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
old__ + val__; \
})
@@ -114,14 +114,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
@@ -136,10 +136,10 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -152,10 +152,10 @@
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -171,11 +171,11 @@
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7eb01a5459cd..b55561cc8786 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
- return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
- return (p4d_t *) pgd;
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
+ return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
+ return (p4d_t *) pgdp;
}
+#define p4d_offset_lockless p4d_offset_lockless
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
- return (pud_t *) p4d_deref(*p4d) + pud_index(address);
- return (pud_t *) p4d;
+ return p4d_offset_lockless(pgdp, *pgdp, address);
+}
+
+static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
+{
+ if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
+ return (pud_t *) p4d_deref(p4d) + pud_index(address);
+ return (pud_t *) p4dp;
+}
+#define pud_offset_lockless pud_offset_lockless
+
+static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
+{
+ return pud_offset_lockless(p4dp, *p4dp, address);
}
#define pud_offset pud_offset
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
+ return (pmd_t *) pud_deref(pud) + pmd_index(address);
+ return (pmd_t *) pudp;
+}
+#define pmd_offset_lockless pmd_offset_lockless
+
+static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
- return (pmd_t *) pud_deref(*pud) + pmd_index(address);
- return (pmd_t *) pud;
+ return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index faca269d5f27..a44ddc2f2dec 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
+void do_secure_storage_violation(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 88bb42ca5008..f7f1e64e0d98 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -33,21 +33,19 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
- trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
local_irq_restore(flags);
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */
- write_seqcount_begin(&idle->seqcount);
+ raw_write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time));
- write_seqcount_end(&idle->seqcount);
+ raw_write_seqcount_end(&idle->seqcount);
}
NOKPROBE_SYMBOL(enabled_wait);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 2c27907a5ffc..9a92638360ee 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK(do_secure_storage_access) /* 3d */
PGM_CHECK(do_non_secure_storage_access) /* 3e */
-PGM_CHECK_DEFAULT /* 3f */
+PGM_CHECK(do_secure_storage_violation) /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */
PGM_CHECK_DEFAULT /* 42 */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 11d2f7d05f91..a76dd27fb2e8 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1268,7 +1268,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
- cb->key == PAGE_DEFAULT_KEY &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
@@ -1330,7 +1329,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
kfree(data);
return -EINVAL;
}
-
+ /*
+ * Override access key in any case, since user space should
+ * not be able to set it, nor should it care about it.
+ */
+ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 125c7f6e8715..1788a5454b6f 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
- cb->key = PAGE_DEFAULT_KEY;
+ cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e600f6953d7c..c2c1b4e723ea 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
/*
* Make sure that the area behind memory_end is protected
*/
-static void reserve_memory_end(void)
+static void __init reserve_memory_end(void)
{
if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX);
@@ -628,7 +628,7 @@ static void reserve_memory_end(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void reserve_oldmem(void)
+static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
@@ -640,7 +640,7 @@ static void reserve_oldmem(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void remove_oldmem(void)
+static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index fc5419ac64c8..7f1266c24f6b 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -19,7 +19,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
unwind_for_each_frame(&state, task, regs, 0) {
addr = unwind_get_return_address(&state);
- if (!addr || !consume_entry(cookie, addr, false))
+ if (!addr || !consume_entry(cookie, addr))
break;
}
}
@@ -56,7 +56,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return -EINVAL;
#endif
- if (!consume_entry(cookie, addr, false))
+ if (!consume_entry(cookie, addr))
return -EINVAL;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4c8c063bce5b..996884dcc9fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ /*
+ * Either KVM messed up the secure guest mapping or the same
+ * page is mapped into multiple secure guests.
+ *
+ * This exception is only triggered when a guest 2 is running
+ * and can therefore never occur in kernel context.
+ */
+ printk_ratelimited(KERN_WARNING
+ "Secure storage violation in task: %s, pid %d\n",
+ current->comm, current->pid);
+ send_sig(SIGSEGV, current, 0);
+}
+
#else
void do_secure_storage_access(struct pt_regs *regs)
{
@@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
+
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ default_trap_handler(regs);
+}
#endif
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1aed1a4dfc2d..eddf71c22875 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
+ p4d_populate(&init_mm, p4d, pud);
}
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3902c9f6f2d6..1804230dd8d8 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -668,10 +668,27 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
zpci_dma_exit_device(zdev);
+ /*
+ * The zPCI function may already be disabled by the platform, this is
+ * detected in clp_disable_fh() which becomes a no-op.
+ */
return clp_disable_fh(zdev);
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
+void zpci_remove_device(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (pdev) {
+ if (pdev->is_virtfn)
+ return zpci_remove_virtfn(pdev, zdev->vfn);
+ pci_stop_and_remove_bus_device_locked(pdev);
+ }
+}
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -716,13 +733,8 @@ void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
- if (zdev->zbus->bus) {
- struct pci_dev *pdev;
-
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
- if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
- }
+ if (zdev->zbus->bus)
+ zpci_remove_device(zdev);
switch (zdev->state) {
case ZPCI_FN_STATE_ONLINE:
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 642a99384688..5967f3014156 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct pci_dev *pdev,
{
int rc;
- virtfn->physfn = pci_dev_get(pdev);
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
- if (rc) {
- pci_dev_put(pdev);
- virtfn->physfn = NULL;
+ if (rc)
return rc;
- }
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ virtfn->physfn = pci_dev_get(pdev);
+
return 0;
}
@@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
- WARN_ON(vfid < 0);
+ if (!zbus->multifunction)
+ return 0;
+
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
@@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (!pdev)
+ continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
break;
}
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
}
}
return rc;
@@ -178,12 +185,23 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
return 0;
}
#endif
+void pcibios_bus_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ /*
+ * With pdev->no_vf_scan the common PCI probing code does not
+ * perform PF/VF linking.
+ */
+ if (zdev->vfn)
+ zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
+
+}
+
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
struct pci_bus *bus;
@@ -214,20 +232,10 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
}
pdev = pci_scan_single_device(bus, zdev->devfn);
- if (pdev) {
- if (!zdev->is_physfn) {
- rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
- if (rc)
- goto failed_with_pdev;
- }
+ if (pdev)
pci_bus_add_device(pdev);
- }
- return 0;
-failed_with_pdev:
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- return rc;
+ return 0;
}
static void zpci_bus_add_devices(struct zpci_bus *zbus)
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index 89be3c354b7b..4972433df458 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
}
+
+#ifdef CONFIG_PCI_IOV
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
+{
+
+ pci_lock_rescan_remove();
+ /* Linux' vfid's start at 0 vfn at 1 */
+ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
+ pci_unlock_rescan_remove();
+}
+#else /* CONFIG_PCI_IOV */
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+#endif /* CONFIG_PCI_IOV */
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index fdebd286f402..d9ae7456dd4c 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -92,6 +92,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
}
+ /* the configuration request may be stale */
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ break;
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
ret = zpci_enable_device(zdev);
@@ -118,7 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
if (!zdev)
break;
if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
ret = zpci_disable_device(zdev);
if (ret)
@@ -137,9 +140,11 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
/* Give the driver a hint that the function is
* already unusable. */
pdev->error_state = pci_channel_io_perm_failure;
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
}
+ zdev->fh = ccdf->fh;
+ zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
index f3dc3f25b3ff..143747c45206 100644
--- a/arch/sh/drivers/platform_early.c
+++ b/arch/sh/drivers/platform_early.c
@@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str,
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
- /* fall-through */
+ fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1a0d7cf71c10..100bf241340b 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_SMP
-#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/percpu.h>
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index 08e1af63edd9..34e25a439c81 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rn & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
@@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rm & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index ad963104d22d..91ab2607a1ff 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -370,7 +370,6 @@ syscall_trace_entry:
nop
cmp/eq #-1, r0
bt syscall_exit
- mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 0d5f3c9d52f3..e4147efa9ec6 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
- /* fallthrough */
+ fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index b05bf92f9c32..5281685f6ad1 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ regs->regs[0] = -ENOSYS;
+ return -1;
+ }
if (secure_computing() == -1)
return -1;
@@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
- return ret ?: regs->regs[0];
+ return 0;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index a0fbb8427b39..4fe3f00137bc 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 4843f48bfe85..774a82b0c649 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -87,7 +87,6 @@ void auxio_set_lte(int on)
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
- /* FALL-THROUGH */
default:
break;
}
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index bfae98ab8638..23f8838dd96e 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p)
else
return 5;
}
- /* Fallthrough */
+ fallthrough;
default:
return 4;
}
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 7580775a14b9..58ad3f7de1fb 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 5d6c2d287e85..177746ae2c81 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c0886b400dad..2a12c86af956 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -359,7 +359,7 @@ int __init pcr_arch_init(void)
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
- /* fallthrough */
+ fallthrough;
default:
err = -ENODEV;
goto out_unregister;
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index da8902295c8c..3df960c137f7 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -224,7 +224,7 @@ void __init of_console_init(void)
case PROMDEV_TTYB:
skip = 1;
- /* FALLTHRU */
+ fallthrough;
case PROMDEV_TTYA:
type = "serial";
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index e2c6f0abda00..e9695a06492f 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index f1f8c8ebe641..d0e0025ee3ba 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
@@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 6937339a272c..255264bcb46a 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index 72e560ef4a09..d5beec856146 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
- /* fall through */
+ fallthrough;
case 1:
rd = (void *)&fregs[freg];
break;
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index c8eabb973b86..b1dbf2fa8c0a 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
} else {
emit_loadimm(K, r_A);
}
- /* Fallthrough */
+ fallthrough;
case BPF_RET | BPF_A:
if (seen_or_pass0) {
if (i != flen - 1) {
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3d57c71c532e..88cd9b5c1b74 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -70,7 +70,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
PT_REGS_RESTART_SYSCALL(regs);
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7101ac64bb20..e876b3a087f9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,7 +75,7 @@ config X86
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
- select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
+ select ARCH_HAS_COPY_MC if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index ee1d3c5834c6..27b5e2bc6a01 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -62,7 +62,7 @@ config EARLY_PRINTK_USB_XDBC
You should normally say N here, unless you want to debug early
crashes or need a very simple printk logging facility.
-config MCSAFE_TEST
+config COPY_MC_TEST
def_bool n
config EFI_PGT_DUMP
diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c
index 4ff01176c1cc..21d56ae83cdf 100644
--- a/arch/x86/boot/cmdline.c
+++ b/arch/x86/boot/cmdline.c
@@ -54,7 +54,7 @@ int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *b
/* else */
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (c == '=' && !*opptr) {
@@ -129,7 +129,7 @@ int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option)
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 3962f592633d..ff7894f39e0e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -43,6 +43,8 @@ KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+# Disable relocation relaxation in case the link is not PIE.
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 0048269180d5..dde7cb3724df 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -178,7 +178,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
}
*size = 0;
}
- /* Fall through */
+ fallthrough;
default:
/*
* If w/o offset, only size specified, memmap=nn[KMG] has the
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 39e592d0e0b4..e478e40fbe5a 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -30,12 +30,9 @@
#define STATIC static
/*
- * Use normal definitions of mem*() from string.c. There are already
- * included header files which expect a definition of memset() and by
- * the time we define memset macro, it is too late.
+ * Provide definitions of memzero and memmove as some of the decompressors will
+ * try to define their own functions if these are not defined as macros.
*/
-#undef memcpy
-#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 995f7b7ad512..a232da487cd2 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int c, size_t len);
int memcmp(const void *s1, const void *s2, size_t len);
-/*
- * Access builtin version by default. If one needs to use optimized version,
- * do "undef memcpy" in .c file and link against right string.c
- */
+/* Access builtin version by default. */
#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index d7577fece9eb..78210793d357 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+# CONFIG_64BIT is not set
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
@@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index f85600143747..9936528e1939 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 98e4d8886f11..07a9331d55e7 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,7 +6,6 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
-#include <asm/inst.h>
/*
@@ -374,12 +373,14 @@ For 32-bit we have the following conventions - kernel is built with
* Fetch the per-CPU GSBASE value for this processor and put it in @reg.
* We normally use %gs for accessing per-CPU data, but we are setting up
* %gs here and obviously can not use %gs itself to access per-CPU data.
+ *
+ * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
+ * may not restore the host's value until the CPU returns to userspace.
+ * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
+ * while running KVM's run loop.
*/
.macro GET_PERCPU_BASE reg:req
- ALTERNATIVE \
- "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
- "RDPID \reg", \
- X86_FEATURE_RDPID
+ LOAD_CPU_AND_NODE_SEG_LIMIT \reg
andq $VDSO_CPUNODE_MASK, \reg
movq __per_cpu_offset(, \reg, 8), \reg
.endm
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 48512c7944e7..870efeec8bda 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- unsigned int nr = (unsigned int)regs->orig_ax;
-
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- /*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
- */
- return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+ return (unsigned int)regs->orig_ax;
}
/*
@@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
unsigned int nr = syscall_32_enter(regs);
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
@@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
return false;
}
+ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
+ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
@@ -288,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
instrumentation_begin();
- run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
instrumentation_begin();
set_irq_regs(old_regs);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 70dea9337816..748c2db3c54e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
+SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
+SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/*
* Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch.
@@ -840,8 +842,9 @@ SYM_CODE_START_LOCAL(paranoid_entry)
* retrieve and set the current CPUs kernel GSBASE. The stored value
* has to be restored in paranoid_exit unconditionally.
*
- * The MSR write ensures that no subsequent load is based on a
- * mispredicted GSBASE. No extra FENCE required.
+ * The unconditional write to GS base below ensures that no subsequent
+ * loads based on a mispredicted GS base can happen, therefore no LFENCE
+ * is needed here.
*/
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
ret
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index 3a07ce3ec70b..f1f96d4d8cd6 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name)
SYM_CODE_END(\name)
.endm
-#ifdef CONFIG_TRACE_IRQFLAGS
- THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
- THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 50963472ee85..31e6887d24f1 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4682,7 +4682,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
- /* fall through */
+ fallthrough;
case INTEL_FAM6_CORE2_MEROM_L:
case INTEL_FAM6_CORE2_PENRYN:
@@ -5062,7 +5062,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE_X:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L:
@@ -5114,7 +5114,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_ICELAKE_L:
case INTEL_FAM6_ICELAKE:
case INTEL_FAM6_TIGERLAKE_L:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 63f58bdf556c..8961653c5dd2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1268,7 +1268,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_ZERO_CALL;
break;
}
- /* fall through */
+ fallthrough;
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index cb94ba86efd2..6a4ca27b2c9e 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -390,6 +390,18 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
+
{ /* end: all zeroes */ },
};
@@ -405,13 +417,35 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+/* BW break down- legacy counters */
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
+
enum perf_snb_uncore_imc_freerunning_types {
- SNB_PCI_UNCORE_IMC_DATA = 0,
+ SNB_PCI_UNCORE_IMC_DATA_READS = 0,
+ SNB_PCI_UNCORE_IMC_DATA_WRITES,
+ SNB_PCI_UNCORE_IMC_GT_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IA_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IO_REQUESTS,
+
SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
- [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
};
static struct attribute *snb_uncore_imc_formats_attr[] = {
@@ -525,6 +559,18 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
+ case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
default:
return -EINVAL;
}
@@ -598,7 +644,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
static struct intel_uncore_type snb_uncore_imc = {
.name = "imc",
- .num_counters = 2,
+ .num_counters = 5,
.num_boxes = 1,
.num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
.mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index ca0976456a6b..6d2df1ee427b 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
-#define acpi_unlazy_tlb(x) leave_mm(x)
-
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 5a42f9206138..51e2bf27cc9b 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -5,6 +5,7 @@
#include <asm/string.h>
#include <asm/page.h>
#include <asm/checksum.h>
+#include <asm/mce.h>
#include <asm-generic/asm-prototypes.h>
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 5c15f95b1ba7..0359cbbd0f50 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -135,6 +135,9 @@
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
@@ -160,6 +163,9 @@
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
diff --git a/arch/x86/include/asm/copy_mc_test.h b/arch/x86/include/asm/copy_mc_test.h
new file mode 100644
index 000000000000..e4991ba96726
--- /dev/null
+++ b/arch/x86/include/asm/copy_mc_test.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _COPY_MC_TEST_H_
+#define _COPY_MC_TEST_H_
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_COPY_MC_TEST
+extern unsigned long copy_mc_test_src;
+extern unsigned long copy_mc_test_dst;
+
+static inline void copy_mc_inject_src(void *addr)
+{
+ if (addr)
+ copy_mc_test_src = (unsigned long) addr;
+ else
+ copy_mc_test_src = ~0UL;
+}
+
+static inline void copy_mc_inject_dst(void *addr)
+{
+ if (addr)
+ copy_mc_test_dst = (unsigned long) addr;
+ else
+ copy_mc_test_dst = ~0UL;
+}
+#else /* CONFIG_COPY_MC_TEST */
+static inline void copy_mc_inject_src(void *addr)
+{
+}
+
+static inline void copy_mc_inject_dst(void *addr)
+{
+}
+#endif /* CONFIG_COPY_MC_TEST */
+
+#else /* __ASSEMBLY__ */
+#include <asm/export.h>
+
+#ifdef CONFIG_COPY_MC_TEST
+.macro COPY_MC_TEST_CTL
+ .pushsection .data
+ .align 8
+ .globl copy_mc_test_src
+ copy_mc_test_src:
+ .quad 0
+ EXPORT_SYMBOL_GPL(copy_mc_test_src)
+ .globl copy_mc_test_dst
+ copy_mc_test_dst:
+ .quad 0
+ EXPORT_SYMBOL_GPL(copy_mc_test_dst)
+ .popsection
+.endm
+
+.macro COPY_MC_TEST_SRC reg count target
+ leaq \count(\reg), %r9
+ cmp copy_mc_test_src, %r9
+ ja \target
+.endm
+
+.macro COPY_MC_TEST_DST reg count target
+ leaq \count(\reg), %r9
+ cmp copy_mc_test_dst, %r9
+ ja \target
+.endm
+#else
+.macro COPY_MC_TEST_CTL
+.endm
+
+.macro COPY_MC_TEST_SRC reg count target
+.endm
+
+.macro COPY_MC_TEST_DST reg count target
+.endm
+#endif /* CONFIG_COPY_MC_TEST */
+#endif /* __ASSEMBLY__ */
+#endif /* _COPY_MC_TEST_H_ */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4a7473ae55ac..7b0afd5e6c57 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-/* free ( 3*32+17) */
+#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -354,6 +354,7 @@
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
+#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
@@ -369,6 +370,7 @@
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
+#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 4ea8584682f9..5861d34f9771 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,6 +56,12 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_IOMMU_SUPPORT
+# define DISABLE_ENQCMD 0
+#else
+# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -75,7 +81,8 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
+ DISABLE_ENQCMD)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index b9c2667ac46c..bc9758ef292e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -81,11 +81,8 @@ extern unsigned long efi_fw_vendor, efi_config_table;
kernel_fpu_end(); \
})
-
#define arch_efi_call_virt(p, f, args...) p->f(args)
-#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
-
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
@@ -125,9 +122,6 @@ struct efi_scratch {
kernel_fpu_end(); \
})
-extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
- u32 type, u64 attribute);
-
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
@@ -143,17 +137,13 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
-extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_print_memmap(void);
-extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
-extern void __init old_map_region(efi_memory_desc_t *md);
-extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_update_mappings(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index a8f9315b9eae..6fe54b2813c1 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -18,8 +18,16 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
+ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
+
+ /*
+ * For !SMAP hardware we patch out CLAC on entry.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMAP) ||
+ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ mask |= X86_EFLAGS_AC;
+
+ WARN_ON_ONCE(flags & mask);
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index d8c2198d543b..1f0cbc52937c 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,10 +29,17 @@ struct pt_regs;
(b)->handler = (tmp).handler - (delta); \
} while (0)
+enum handler_type {
+ EX_HANDLER_NONE,
+ EX_HANDLER_FAULT,
+ EX_HANDLER_UACCESS,
+ EX_HANDLER_OTHER
+};
+
extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
-extern bool ex_has_fault_handler(unsigned long ip);
+extern enum handler_type ex_get_fault_handler_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
#endif
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index b774c52e5411..dcd9503b1098 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -62,4 +62,16 @@ extern void switch_fpu_return(void);
*/
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
+/*
+ * Tasks that are not using SVA have mm->pasid set to zero to note that they
+ * will not have the valid bit set in MSR_IA32_PASID while they are running.
+ */
+#define PASID_DISABLED 0
+
+#ifdef CONFIG_IOMMU_SUPPORT
+/* Update current's PASID MSR/state by mm's PASID. */
+void update_pasid(void);
+#else
+static inline void update_pasid(void) { }
+#endif
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 0a460f2a3f90..eb1ed3bd8d96 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -583,6 +583,13 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
pkru_val = pk->pkru;
}
__write_pkru(pkru_val);
+
+ /*
+ * Expensive PASID MSR write will be avoided in update_pasid() because
+ * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
+ * unless it's different from mm->pasid to reduce overhead.
+ */
+ update_pasid();
}
/*
@@ -602,9 +609,7 @@ static inline u64 xgetbv(u32 index)
{
u32 eax, edx;
- asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
- : "=a" (eax), "=d" (edx)
- : "c" (index));
+ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index));
return eax + ((u64)edx << 32);
}
@@ -613,8 +618,7 @@ static inline void xsetbv(u32 index, u64 value)
u32 eax = value;
u32 edx = value >> 32;
- asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
- : : "a" (eax), "d" (edx), "c" (index));
+ asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index c87364ea6446..f5a38a5f3ae1 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -114,7 +114,7 @@ enum xfeature {
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
- XFEATURE_RSRVD_COMP_10,
+ XFEATURE_PASID,
XFEATURE_RSRVD_COMP_11,
XFEATURE_RSRVD_COMP_12,
XFEATURE_RSRVD_COMP_13,
@@ -134,6 +134,7 @@ enum xfeature {
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
+#define XFEATURE_MASK_PASID (1 << XFEATURE_PASID)
#define XFEATURE_MASK_LBR (1 << XFEATURE_LBR)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
@@ -256,6 +257,14 @@ struct arch_lbr_state {
struct lbr_entry entries[];
} __packed;
+/*
+ * State component 10 is supervisor state used for context-switching the
+ * PASID state.
+ */
+struct ia32_pasid_state {
+ u64 pasid;
+} __packed;
+
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 14ab815132d4..47a92232d595 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -35,7 +35,7 @@
XFEATURE_MASK_BNDCSR)
/* All currently supported supervisor features */
-#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (0)
+#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID)
/*
* A supervisor state component may not always contain valuable information,
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 296b346184b2..fb42659f6e98 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -60,12 +60,26 @@
#define FRAME_END "pop %" _ASM_BP "\n"
#ifdef CONFIG_X86_64
+
#define ENCODE_FRAME_POINTER \
"lea 1(%rsp), %rbp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs + 1;
+}
+
#else /* !CONFIG_X86_64 */
+
#define ENCODE_FRAME_POINTER \
"movl %esp, %ebp\n\t" \
"andl $0x7fffffff, %ebp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs & 0x7fffffff;
+}
+
#endif /* CONFIG_X86_64 */
#endif /* __ASSEMBLY__ */
@@ -83,6 +97,11 @@
#define ENCODE_FRAME_POINTER
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return 0;
+}
+
#endif
#define FRAME_BEGIN
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index d552646411a9..35cff5f2becf 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -57,7 +57,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
{
unsigned long fsbase;
- if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE))
fsbase = rdfsbase();
else
rdmsrl(MSR_FS_BASE, fsbase);
@@ -67,7 +67,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
static inline void x86_fsbase_write_cpu(unsigned long fsbase)
{
- if (static_cpu_has(X86_FEATURE_FSGSBASE))
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE))
wrfsbase(fsbase);
else
wrmsrl(MSR_FS_BASE, fsbase);
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index a43366191212..df4dc975e8fd 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
instrumentation_begin(); \
irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
- run_on_irqstack_cond(__##func, regs, regs); \
+ run_sysvec_on_irqstack_cond(__##func, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
irqentry_exit(regs, state); \
@@ -591,10 +591,6 @@ DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
-# ifdef CONFIG_X86_UV
-DECLARE_IDTENTRY_SYSVEC(UV_BAU_MESSAGE, sysvec_uv_bau_message);
-# endif
-
# ifdef CONFIG_X86_MCE_THRESHOLD
DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
# endif
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e1aa17a468a8..d726459d08e5 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
/**
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
- * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
* @src: source
* @count: number of 512 bits quantities to submit
*
@@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr,
* Warning: Do not use this helper unless your driver has checked that the CPU
* instruction is supported on the platform.
*/
-static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
size_t count)
{
- /*
- * Note that this isn't an "on-stack copy", just definition of "dst"
- * as a pointer to 64-bytes of stuff that is going to be overwritten.
- * In the MOVDIR64B case that may be needed as you can use the
- * MOVDIR64B instruction to copy arbitrary memory around. This trick
- * lets the compiler know how much gets clobbered.
- */
- volatile struct { char _[64]; } *dst = __dst;
const u8 *from = src;
const u8 *end = from + count * 64;
while (from < end) {
- /* MOVDIR64B [rdx], rax */
- asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
- : "=m" (dst)
- : "d" (from), "a" (dst));
+ movdir64b(dst, from);
from += 64;
}
}
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 4ae66f097101..775816965c6a 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
return __this_cpu_read(irq_count) != -1;
}
-void asm_call_on_stack(void *sp, void *func, void *arg);
+void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs);
+void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc);
-static __always_inline void __run_on_irqstack(void *func, void *arg)
+static __always_inline void __run_on_irqstack(void (*func)(void))
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_add(irq_count, 1);
- asm_call_on_stack(tos - 8, func, arg);
+ asm_call_on_stack(tos - 8, func, NULL);
+ __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_sysvec_on_stack(tos - 8, func, regs);
+ __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_irq_on_stack(tos - 8, func, desc);
__this_cpu_sub(irq_count, 1);
}
#else /* CONFIG_X86_64 */
static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void *func, void *arg) { }
+static inline void __run_on_irqstack(void (*func)(void)) { }
+static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs) { }
+static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+ struct irq_desc *desc) { }
#endif /* !CONFIG_X86_64 */
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
return !user_mode(regs) && !irqstack_active();
}
-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+
+static __always_inline void run_on_irqstack_cond(void (*func)(void),
struct pt_regs *regs)
{
- void (*__func)(void *arg) = func;
+ lockdep_assert_irqs_disabled();
+
+ if (irq_needs_irq_stack(regs))
+ __run_on_irqstack(func);
+ else
+ func();
+}
+
+static __always_inline void
+run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
+ struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+ if (irq_needs_irq_stack(regs))
+ __run_sysvec_on_irqstack(func, regs);
+ else
+ func(regs);
+}
+
+static __always_inline void
+run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
+ struct pt_regs *regs)
+{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
- __run_on_irqstack(__func, arg);
+ __run_irq_on_irqstack(func, desc);
else
- __func(arg);
+ func(desc);
}
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5ab3af7275d8..5303dbc5c9bc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index cf503824529c..a0f147893a04 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -136,9 +136,24 @@
#define MCE_HANDLED_NFIT BIT_ULL(3)
#define MCE_HANDLED_EDAC BIT_ULL(4)
#define MCE_HANDLED_MCELOG BIT_ULL(5)
+
+/*
+ * Indicates an MCE which has happened in kernel space but from
+ * which the kernel can recover simply by executing fixup_exception()
+ * so that an error is returned to the caller of the function that
+ * hit the machine check.
+ */
#define MCE_IN_KERNEL_RECOV BIT_ULL(6)
/*
+ * Indicates an MCE that happened in kernel space while copying data
+ * from user. In this case fixup_exception() gets the kernel to the
+ * error exit for the copy function. Machine check handler can then
+ * treat it like a fault taken in user mode.
+ */
+#define MCE_IN_KERNEL_COPYIN BIT_ULL(7)
+
+/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
@@ -174,6 +189,15 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb);
extern int mce_p5_enabled;
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+extern void enable_copy_mc_fragile(void);
+unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt);
+#else
+static inline void enable_copy_mc_fragile(void)
+{
+}
+#endif
+
#ifdef CONFIG_X86_MCE
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
@@ -200,12 +224,8 @@ void mce_setup(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device);
-/*
- * Maximum banks number.
- * This is the limit of the current register layout on
- * Intel CPUs.
- */
-#define MAX_NR_BANKS 32
+/* Maximum number of MCA banks per CPU. */
+#define MAX_NR_BANKS 64
#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
@@ -328,7 +348,6 @@ enum smca_bank_types {
struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
- u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */
u8 count; /* Number of instances. */
};
diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h
deleted file mode 100644
index eb59804b6201..000000000000
--- a/arch/x86/include/asm/mcsafe_test.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MCSAFE_TEST_H_
-#define _MCSAFE_TEST_H_
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_MCSAFE_TEST
-extern unsigned long mcsafe_test_src;
-extern unsigned long mcsafe_test_dst;
-
-static inline void mcsafe_inject_src(void *addr)
-{
- if (addr)
- mcsafe_test_src = (unsigned long) addr;
- else
- mcsafe_test_src = ~0UL;
-}
-
-static inline void mcsafe_inject_dst(void *addr)
-{
- if (addr)
- mcsafe_test_dst = (unsigned long) addr;
- else
- mcsafe_test_dst = ~0UL;
-}
-#else /* CONFIG_MCSAFE_TEST */
-static inline void mcsafe_inject_src(void *addr)
-{
-}
-
-static inline void mcsafe_inject_dst(void *addr)
-{
-}
-#endif /* CONFIG_MCSAFE_TEST */
-
-#else /* __ASSEMBLY__ */
-#include <asm/export.h>
-
-#ifdef CONFIG_MCSAFE_TEST
-.macro MCSAFE_TEST_CTL
- .pushsection .data
- .align 8
- .globl mcsafe_test_src
- mcsafe_test_src:
- .quad 0
- EXPORT_SYMBOL_GPL(mcsafe_test_src)
- .globl mcsafe_test_dst
- mcsafe_test_dst:
- .quad 0
- EXPORT_SYMBOL_GPL(mcsafe_test_dst)
- .popsection
-.endm
-
-.macro MCSAFE_TEST_SRC reg count target
- leaq \count(\reg), %r9
- cmp mcsafe_test_src, %r9
- ja \target
-.endm
-
-.macro MCSAFE_TEST_DST reg count target
- leaq \count(\reg), %r9
- cmp mcsafe_test_dst, %r9
- ja \target
-.endm
-#else
-.macro MCSAFE_TEST_CTL
-.endm
-
-.macro MCSAFE_TEST_SRC reg count target
-.endm
-
-.macro MCSAFE_TEST_DST reg count target
-.endm
-#endif /* CONFIG_MCSAFE_TEST */
-#endif /* __ASSEMBLY__ */
-#endif /* _MCSAFE_TEST_H_ */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 0a301ad0b02f..9257667d13c5 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -59,5 +59,6 @@ typedef struct {
}
void leave_mm(int cpu);
+#define leave_mm leave_mm
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2859ee4f39a8..aaddc6a9e237 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -257,6 +257,9 @@
#define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de
+#define MSR_IA32_PASID 0x00000d93
+#define MSR_IA32_PASID_VALID BIT_ULL(31)
+
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 40aa69d04862..d8324a236696 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -327,8 +327,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 59a3e13204c3..94624fb06fac 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,6 +234,76 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop")
+static inline void serialize(void)
+{
+ /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
+ asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
+}
+
+/* The dst parameter must be 64-bytes aligned */
+static inline void movdir64b(void *dst, const void *src)
+{
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } *__dst = dst;
+
+ /*
+ * MOVDIR64B %(rdx), rax.
+ *
+ * Both __src and __dst must be memory constraints in order to tell the
+ * compiler that no other memory accesses should be reordered around
+ * this one.
+ *
+ * Also, both must be supplied as lvalues because this tells
+ * the compiler what the object is (its size) the instruction accesses.
+ * I.e., not the pointers but what they point to, thus the deref'ing '*'.
+ */
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ : "+m" (*__dst)
+ : "m" (*__src), "a" (__dst), "d" (__src));
+}
+
+/**
+ * enqcmds - Enqueue a command in supervisor (CPL0) mode
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: 512 bits memory operand
+ *
+ * The ENQCMDS instruction allows software to write a 512-bit command to
+ * a 512-bit-aligned special MMIO region that supports the instruction.
+ * A return status is loaded into the ZF flag in the RFLAGS register.
+ * ZF = 0 equates to success, and ZF = 1 indicates retry or error.
+ *
+ * This function issues the ENQCMDS instruction to submit data from
+ * kernel space to MMIO space, in a unit of 512 bits. Order of data access
+ * is not guaranteed, nor is a memory barrier performed afterwards. It
+ * returns 0 on success and -EAGAIN on failure.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the
+ * ENQCMDS instruction is supported on the platform and the device accepts
+ * ENQCMDS.
+ */
+static inline int enqcmds(void __iomem *dst, const void *src)
+{
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } *__dst = dst;
+ int zf;
+
+ /*
+ * ENQCMDS %(rdx), rax
+ *
+ * See movdir64b()'s comment on operand specification.
+ */
+ asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90"
+ CC_SET(z)
+ : CC_OUT(z) (zf), "+m" (*__dst)
+ : "m" (*__src), "a" (__dst), "d" (__src));
+
+ /* Submission failure is indicated via EFLAGS.ZF=1 */
+ if (zf)
+ return -EAGAIN;
+
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 75314c3dbe47..6e450827f677 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -82,38 +82,6 @@ int strcmp(const char *cs, const char *ct);
#endif
-#define __HAVE_ARCH_MEMCPY_MCSAFE 1
-__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
- size_t cnt);
-DECLARE_STATIC_KEY_FALSE(mcsafe_key);
-
-/**
- * memcpy_mcsafe - copy memory with indication if a machine check happened
- *
- * @dst: destination address
- * @src: source address
- * @cnt: number of bytes to copy
- *
- * Low level memory copy function that catches machine checks
- * We only call into the "safe" function on systems that can
- * actually do machine check recovery. Everyone else can just
- * use memcpy().
- *
- * Return 0 for success, or number of bytes not copied if there was an
- * exception.
- */
-static __always_inline __must_check unsigned long
-memcpy_mcsafe(void *dst, const void *src, size_t cnt)
-{
-#ifdef CONFIG_X86_MCE
- if (static_branch_unlikely(&mcsafe_key))
- return __memcpy_mcsafe(dst, src, cnt);
- else
-#endif
- memcpy(dst, src, cnt);
- return 0;
-}
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index fdb5b356e59b..0fd4a9dfb29c 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -5,6 +5,7 @@
#include <linux/preempt.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
@@ -46,22 +47,34 @@ static inline void iret_to_self(void)
*
* b) Text was modified on a different CPU, may subsequently be
* executed on this CPU, and you want to make sure the new version
- * gets executed. This generally means you're calling this in a IPI.
+ * gets executed. This generally means you're calling this in an IPI.
*
* If you're calling this for a different reason, you're probably doing
* it wrong.
+ *
+ * Like all of Linux's memory ordering operations, this is a
+ * compiler barrier as well.
*/
static inline void sync_core(void)
{
/*
- * There are quite a few ways to do this. IRET-to-self is nice
- * because it works on every CPU, at any CPL (so it's compatible
- * with paravirtualization), and it never exits to a hypervisor.
- * The only down sides are that it's a bit slow (it seems to be
- * a bit more than 2x slower than the fastest options) and that
- * it unmasks NMIs. The "push %cs" is needed because, in
- * paravirtual environments, __KERNEL_CS may not be a valid CS
- * value when we do IRET directly.
+ * The SERIALIZE instruction is the most straightforward way to
+ * do this, but it is not universally available.
+ */
+ if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
+ serialize();
+ return;
+ }
+
+ /*
+ * For all other processors, there are quite a few ways to do this.
+ * IRET-to-self is nice because it works on every CPU, at any CPL
+ * (so it's compatible with paravirtualization), and it never exits
+ * to a hypervisor. The only downsides are that it's a bit slow
+ * (it seems to be a bit more than 2x slower than the fastest
+ * options) and that it unmasks NMIs. The "push %cs" is needed,
+ * because in paravirtual environments __KERNEL_CS may not be a
+ * valid CS value when we do IRET directly.
*
* In case NMI unmasking or performance ever becomes a problem,
* the next best option appears to be MOV-to-CR2 and an
@@ -71,9 +84,6 @@ static inline void sync_core(void)
* CPUID is the conventional way, but it's nasty: it doesn't
* exist on some 486-like CPUs, and it usually exits to a
* hypervisor.
- *
- * Like all of Linux's memory ordering operations, this is a
- * compiler barrier as well.
*/
iret_to_self();
}
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 714b1a30e7b0..df0b7bfc1234 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -35,6 +35,8 @@ extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
+bool fault_in_kernel_space(unsigned long address);
+
#ifdef CONFIG_VMAP_STACK
void __noreturn handle_stack_overflow(const char *message,
struct pt_regs *regs,
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index ecefaffd15d4..eff7fb847149 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -455,6 +455,15 @@ extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned len);
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+unsigned long __must_check
+copy_mc_to_user(void *to, const void *from, unsigned len);
+#endif
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index bc10e3dc64fe..e7265a552f4f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -47,22 +47,6 @@ copy_user_generic(void *to, const void *from, unsigned len)
}
static __always_inline __must_check unsigned long
-copy_to_user_mcsafe(void *to, const void *from, unsigned len)
-{
- unsigned long ret;
-
- __uaccess_begin();
- /*
- * Note, __memcpy_mcsafe() is explicitly used since it can
- * handle exceptions / faults. memcpy_mcsafe() may fall back to
- * memcpy() which lacks this handling.
- */
- ret = __memcpy_mcsafe(to, from, len);
- __uaccess_end();
- return ret;
-}
-
-static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
return copy_user_generic(dst, (__force void *)src, size);
@@ -102,8 +86,4 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
-
-unsigned long
-mcsafe_handle_tail(char *to, char *from, unsigned len);
-
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 70050d0136c3..08b3d810dfba 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -5,8 +5,9 @@
/*
* UV BIOS layer definitions.
*
- * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson <rja@sgi.com>
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/rtc.h>
@@ -71,6 +72,11 @@ struct uv_gam_range_entry {
u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
};
+#define UV_AT_SIZE 8 /* 7 character arch type + NULL char */
+struct uv_arch_type_entry {
+ char archtype[UV_AT_SIZE];
+};
+
#define UV_SYSTAB_SIG "UVST"
#define UV_SYSTAB_VERSION_1 1 /* UV2/3 BIOS version */
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
@@ -79,10 +85,14 @@ struct uv_gam_range_entry {
#define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */
#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3
+#define UV_SYSTAB_VERSION_UV5 0x500 /* UV5 GAM base version */
+#define UV_SYSTAB_VERSION_UV5_LATEST UV_SYSTAB_VERSION_UV5
+
#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */
#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
#define UV_SYSTAB_TYPE_GAM_RNG_TBL 2 /* GAM entry table */
-#define UV_SYSTAB_TYPE_MAX 3
+#define UV_SYSTAB_TYPE_ARCH_TYPE 3 /* UV arch type */
+#define UV_SYSTAB_TYPE_MAX 4
/*
* The UV system table describes specific firmware
@@ -133,6 +143,7 @@ extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
extern int uv_bios_init(void);
+extern unsigned long get_uv_systab_phys(bool msg);
extern unsigned long sn_rtc_cycles_per_second;
extern int uv_type;
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index e48aea9ba47d..172d3e4a9e4b 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -35,10 +35,8 @@ extern int is_uv_hubbed(int uvtype);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
-extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info);
-#else /* X86_UV */
+#else /* !X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline bool is_early_uv_system(void) { return 0; }
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
deleted file mode 100644
index cd24804955d7..000000000000
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * SGI UV Broadcast Assist Unit definitions
- *
- * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_X86_UV_UV_BAU_H
-#define _ASM_X86_UV_UV_BAU_H
-
-#include <linux/bitmap.h>
-#include <asm/idtentry.h>
-
-#define BITSPERBYTE 8
-
-/*
- * Broadcast Assist Unit messaging structures
- *
- * Selective Broadcast activations are induced by software action
- * specifying a particular 8-descriptor "set" via a 6-bit index written
- * to an MMR.
- * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
- * each 6-bit index value. These descriptor sets are mapped in sequence
- * starting with set 0 located at the address specified in the
- * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
- * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
- *
- * We will use one set for sending BAU messages from each of the
- * cpu's on the uvhub.
- *
- * TLB shootdown will use the first of the 8 descriptors of each set.
- * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
- */
-
-#define MAX_CPUS_PER_UVHUB 128
-#define MAX_CPUS_PER_SOCKET 64
-#define ADP_SZ 64 /* hardware-provided max. */
-#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
-#define ITEMS_PER_DESC 8
-/* the 'throttle' to prevent the hardware stay-busy bug */
-#define MAX_BAU_CONCURRENT 3
-#define UV_ACT_STATUS_MASK 0x3
-#define UV_ACT_STATUS_SIZE 2
-#define UV_DISTRIBUTION_SIZE 256
-#define UV_SW_ACK_NPENDING 8
-#define UV_NET_ENDPOINT_INTD 0x28
-#define UV_PAYLOADQ_GNODE_SHIFT 49
-#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
-#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
-#define UV_BAU_TUNABLES_DIR "sgi_uv"
-#define UV_BAU_TUNABLES_FILE "bau_tunables"
-#define WHITESPACE " \t\n"
-#define cpubit_isset(cpu, bau_local_cpumask) \
- test_bit((cpu), (bau_local_cpumask).bits)
-
-/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
-/*
- * UV2: Bit 19 selects between
- * (0): 10 microsecond timebase and
- * (1): 80 microseconds
- * we're using 560us
- */
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
-/* assuming UV3 is the same */
-
-#define BAU_MISC_CONTROL_MULT_MASK 3
-
-#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
-/* [30:28] URGENCY_7 an index into a table of times */
-#define BAU_URGENCY_7_SHIFT 28
-#define BAU_URGENCY_7_MASK 7
-
-#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
-/* [45:40] BAU - BAU transaction timeout select - a multiplier */
-#define BAU_TRANS_SHIFT 40
-#define BAU_TRANS_MASK 0x3f
-
-/*
- * shorten some awkward names
- */
-#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
-#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
-#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
-#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
-#define PREFETCH_HINT_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT
-#define SB_STATUS_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
-#define write_gmmr uv_write_global_mmr64
-#define write_lmmr uv_write_local_mmr
-#define read_lmmr uv_read_local_mmr
-#define read_gmmr uv_read_global_mmr64
-
-/*
- * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
- */
-#define DS_IDLE 0
-#define DS_ACTIVE 1
-#define DS_DESTINATION_TIMEOUT 2
-#define DS_SOURCE_TIMEOUT 3
-/*
- * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
- * values 1 and 3 will not occur
- * Decoded meaning ERROR BUSY AUX ERR
- * ------------------------------- ---- ----- -------
- * IDLE 0 0 0
- * BUSY (active) 0 1 0
- * SW Ack Timeout (destination) 1 0 0
- * SW Ack INTD rejected (strong NACK) 1 0 1
- * Source Side Time Out Detected 1 1 0
- * Destination Side PUT Failed 1 1 1
- */
-#define UV2H_DESC_IDLE 0
-#define UV2H_DESC_BUSY 2
-#define UV2H_DESC_DEST_TIMEOUT 4
-#define UV2H_DESC_DEST_STRONG_NACK 5
-#define UV2H_DESC_SOURCE_TIMEOUT 6
-#define UV2H_DESC_DEST_PUT_ERR 7
-
-/*
- * delay for 'plugged' timeout retries, in microseconds
- */
-#define PLUGGED_DELAY 10
-
-/*
- * threshholds at which to use IPI to free resources
- */
-/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
-#define PLUGSB4RESET 100
-/* after this many consecutive timeouts, use IPI to release resources */
-#define TIMEOUTSB4RESET 1
-/* at this number uses of IPI to release resources, giveup the request */
-#define IPI_RESET_LIMIT 1
-/* after this # consecutive successes, bump up the throttle if it was lowered */
-#define COMPLETE_THRESHOLD 5
-/* after this # of giveups (fall back to kernel IPI's) disable the use of
- the BAU for a period of time */
-#define GIVEUP_LIMIT 100
-
-#define UV_LB_SUBNODEID 0x10
-
-#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
-#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
-/* 4 bits of software ack period */
-#define UV2_ACK_MASK 0x7UL
-#define UV2_ACK_UNITS_SHFT 3
-#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
-
-/*
- * number of entries in the destination side payload queue
- */
-#define DEST_Q_SIZE 20
-/*
- * number of destination side software ack resources
- */
-#define DEST_NUM_RESOURCES 8
-/*
- * completion statuses for sending a TLB flush message
- */
-#define FLUSH_RETRY_PLUGGED 1
-#define FLUSH_RETRY_TIMEOUT 2
-#define FLUSH_GIVEUP 3
-#define FLUSH_COMPLETE 4
-
-/*
- * tuning the action when the numalink network is extremely delayed
- */
-#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
- microseconds */
-#define CONGESTED_REPS 10 /* long delays averaged over
- this many broadcasts */
-#define DISABLED_PERIOD 10 /* time for the bau to be
- disabled, in seconds */
-/* see msg_type: */
-#define MSG_NOOP 0
-#define MSG_REGULAR 1
-#define MSG_RETRY 2
-
-#define BAU_DESC_QUALIFIER 0x534749
-
-enum uv_bau_version {
- UV_BAU_V2 = 2,
- UV_BAU_V3,
- UV_BAU_V4,
-};
-
-/*
- * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
- * If the 'multilevel' flag in the header portion of the descriptor
- * has been set to 0, then endpoint multi-unicast mode is selected.
- * The distribution specification (32 bytes) is interpreted as a 256-bit
- * distribution vector. Adjacent bits correspond to consecutive even numbered
- * nodeIDs. The result of adding the index of a given bit to the 15-bit
- * 'base_dest_nasid' field of the header corresponds to the
- * destination nodeID associated with that specified bit.
- */
-struct pnmask {
- unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
-};
-
-/*
- * mask of cpu's on a uvhub
- * (during initialization we need to check that unsigned long has
- * enough bits for max. cpu's per uvhub)
- */
-struct bau_local_cpumask {
- unsigned long bits;
-};
-
-/*
- * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
- * only 12 bytes (96 bits) of the payload area are usable.
- * An additional 3 bytes (bits 27:4) of the header address are carried
- * to the next bytes of the destination payload queue.
- * And an additional 2 bytes of the header Suppl_A field are also
- * carried to the destination payload queue.
- * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
- * of the destination payload queue, which is written by the hardware
- * with the s/w ack resource bit vector.
- * [ effective message contents (16 bytes (128 bits) maximum), not counting
- * the s/w ack bit vector ]
- */
-
-/**
- * struct uv2_3_bau_msg_payload - defines payload for INTD transactions
- * @address: Signifies a page or all TLB's of the cpu
- * @sending_cpu: CPU from which the message originates
- * @acknowledge_count: CPUs on the destination Hub that received the interrupt
- */
-struct uv2_3_bau_msg_payload {
- u64 address;
- u16 sending_cpu;
- u16 acknowledge_count;
-};
-
-/**
- * struct uv4_bau_msg_payload - defines payload for INTD transactions
- * @address: Signifies a page or all TLB's of the cpu
- * @sending_cpu: CPU from which the message originates
- * @acknowledge_count: CPUs on the destination Hub that received the interrupt
- * @qualifier: Set by source to verify origin of INTD broadcast
- */
-struct uv4_bau_msg_payload {
- u64 address;
- u16 sending_cpu;
- u16 acknowledge_count;
- u32 reserved:8;
- u32 qualifier:24;
-};
-
-/*
- * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
- * see figure 9-2 of harp_sys.pdf
- * assuming UV3 is the same
- */
-struct uv2_3_bau_msg_header {
- unsigned int base_dest_nasid:15; /* nasid of the first bit */
- /* bits 14:0 */ /* in uvhub map */
- unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
- /* bits 19:15 */
- unsigned int rsvd_1:1; /* must be zero */
- /* bit 20 */
- /* Address bits 59:21 */
- /* bits 25:2 of address (44:21) are payload */
- /* these next 24 bits become bytes 12-14 of msg */
- /* bits 28:21 land in byte 12 */
- unsigned int replied_to:1; /* sent as 0 by the source to
- byte 12 */
- /* bit 21 */
- unsigned int msg_type:3; /* software type of the
- message */
- /* bits 24:22 */
- unsigned int canceled:1; /* message canceled, resource
- is to be freed*/
- /* bit 25 */
- unsigned int payload_1:3; /* not currently used */
- /* bits 28:26 */
-
- /* bits 36:29 land in byte 13 */
- unsigned int payload_2a:3; /* not currently used */
- unsigned int payload_2b:5; /* not currently used */
- /* bits 36:29 */
-
- /* bits 44:37 land in byte 14 */
- unsigned int payload_3:8; /* not currently used */
- /* bits 44:37 */
-
- unsigned int rsvd_2:7; /* reserved */
- /* bits 51:45 */
- unsigned int swack_flag:1; /* software acknowledge flag */
- /* bit 52 */
- unsigned int rsvd_3a:3; /* must be zero */
- unsigned int rsvd_3b:8; /* must be zero */
- unsigned int rsvd_3c:8; /* must be zero */
- unsigned int rsvd_3d:3; /* must be zero */
- /* bits 74:53 */
- unsigned int fairness:3; /* usually zero */
- /* bits 77:75 */
-
- unsigned int sequence:16; /* message sequence number */
- /* bits 93:78 Suppl_A */
- unsigned int chaining:1; /* next descriptor is part of
- this activation*/
- /* bit 94 */
- unsigned int multilevel:1; /* multi-level multicast
- format */
- /* bit 95 */
- unsigned int rsvd_4:24; /* ordered / source node /
- source subnode / aging
- must be zero */
- /* bits 119:96 */
- unsigned int command:8; /* message type */
- /* bits 127:120 */
-};
-
-/*
- * The activation descriptor:
- * The format of the message to send, plus all accompanying control
- * Should be 64 bytes
- */
-struct bau_desc {
- struct pnmask distribution;
- /*
- * message template, consisting of header and payload:
- */
- union bau_msg_header {
- struct uv2_3_bau_msg_header uv2_3_hdr;
- } header;
-
- union bau_payload_header {
- struct uv2_3_bau_msg_payload uv2_3;
- struct uv4_bau_msg_payload uv4;
- } payload;
-};
-/* UV2:
- * -payload-- ---------header------
- * bytes 0-11 bits 70-78 bits 21-44
- * A B (2) C (3)
- *
- * A/B/C are moved to:
- * A C B
- * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
- * ------------payload queue-----------
- */
-
-/*
- * The payload queue on the destination side is an array of these.
- * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
- * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
- * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
- * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
- * swack_vec and payload_2)
- * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
- * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
- * operation."
- */
-struct bau_pq_entry {
- unsigned long address; /* signifies a page or all TLB's
- of the cpu */
- /* 64 bits, bytes 0-7 */
- unsigned short sending_cpu; /* cpu that sent the message */
- /* 16 bits, bytes 8-9 */
- unsigned short acknowledge_count; /* filled in by destination */
- /* 16 bits, bytes 10-11 */
- /* these next 3 bytes come from bits 58-81 of the message header */
- unsigned short replied_to:1; /* sent as 0 by the source */
- unsigned short msg_type:3; /* software message type */
- unsigned short canceled:1; /* sent as 0 by the source */
- unsigned short unused1:3; /* not currently using */
- /* byte 12 */
- unsigned char unused2a; /* not currently using */
- /* byte 13 */
- unsigned char unused2; /* not currently using */
- /* byte 14 */
- unsigned char swack_vec; /* filled in by the hardware */
- /* byte 15 (bits 127:120) */
- unsigned short sequence; /* message sequence number */
- /* bytes 16-17 */
- unsigned char unused4[2]; /* not currently using bytes 18-19 */
- /* bytes 18-19 */
- int number_of_cpus; /* filled in at destination */
- /* 32 bits, bytes 20-23 (aligned) */
- unsigned char unused5[8]; /* not using */
- /* bytes 24-31 */
-};
-
-struct msg_desc {
- struct bau_pq_entry *msg;
- int msg_slot;
- struct bau_pq_entry *queue_first;
- struct bau_pq_entry *queue_last;
-};
-
-struct reset_args {
- int sender;
-};
-
-/*
- * This structure is allocated per_cpu for UV TLB shootdown statistics.
- */
-struct ptc_stats {
- /* sender statistics */
- unsigned long s_giveup; /* number of fall backs to
- IPI-style flushes */
- unsigned long s_requestor; /* number of shootdown
- requests */
- unsigned long s_stimeout; /* source side timeouts */
- unsigned long s_dtimeout; /* destination side timeouts */
- unsigned long s_strongnacks; /* number of strong nack's */
- unsigned long s_time; /* time spent in sending side */
- unsigned long s_retriesok; /* successful retries */
- unsigned long s_ntargcpu; /* total number of cpu's
- targeted */
- unsigned long s_ntargself; /* times the sending cpu was
- targeted */
- unsigned long s_ntarglocals; /* targets of cpus on the local
- blade */
- unsigned long s_ntargremotes; /* targets of cpus on remote
- blades */
- unsigned long s_ntarglocaluvhub; /* targets of the local hub */
- unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
- unsigned long s_ntarguvhub; /* total number of uvhubs
- targeted */
- unsigned long s_ntarguvhub16; /* number of times target
- hubs >= 16*/
- unsigned long s_ntarguvhub8; /* number of times target
- hubs >= 8 */
- unsigned long s_ntarguvhub4; /* number of times target
- hubs >= 4 */
- unsigned long s_ntarguvhub2; /* number of times target
- hubs >= 2 */
- unsigned long s_ntarguvhub1; /* number of times target
- hubs == 1 */
- unsigned long s_resets_plug; /* ipi-style resets from plug
- state */
- unsigned long s_resets_timeout; /* ipi-style resets from
- timeouts */
- unsigned long s_busy; /* status stayed busy past
- s/w timer */
- unsigned long s_throttles; /* waits in throttle */
- unsigned long s_retry_messages; /* retry broadcasts */
- unsigned long s_bau_reenabled; /* for bau enable/disable */
- unsigned long s_bau_disabled; /* for bau enable/disable */
- unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
- unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
- unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
- unsigned long s_overipilimit; /* over the ipi reset limit */
- unsigned long s_giveuplimit; /* disables, over giveup limit*/
- unsigned long s_enters; /* entries to the driver */
- unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
- unsigned long s_plugged; /* plugged by h/w bug*/
- unsigned long s_congested; /* giveup on long wait */
- /* destination statistics */
- unsigned long d_alltlb; /* times all tlb's on this
- cpu were flushed */
- unsigned long d_onetlb; /* times just one tlb on this
- cpu was flushed */
- unsigned long d_multmsg; /* interrupts with multiple
- messages */
- unsigned long d_nomsg; /* interrupts with no message */
- unsigned long d_time; /* time spent on destination
- side */
- unsigned long d_requestee; /* number of messages
- processed */
- unsigned long d_retries; /* number of retry messages
- processed */
- unsigned long d_canceled; /* number of messages canceled
- by retries */
- unsigned long d_nocanceled; /* retries that found nothing
- to cancel */
- unsigned long d_resets; /* number of ipi-style requests
- processed */
- unsigned long d_rcanceled; /* number of messages canceled
- by resets */
-};
-
-struct tunables {
- int *tunp;
- int deflt;
-};
-
-struct hub_and_pnode {
- short uvhub;
- short pnode;
-};
-
-struct socket_desc {
- short num_cpus;
- short cpu_number[MAX_CPUS_PER_SOCKET];
-};
-
-struct uvhub_desc {
- unsigned short socket_mask;
- short num_cpus;
- short uvhub;
- short pnode;
- struct socket_desc socket[2];
-};
-
-/**
- * struct bau_control
- * @status_mmr: location of status mmr, determined by uvhub_cpu
- * @status_index: index of ERR|BUSY bits in status mmr, determined by uvhub_cpu
- *
- * Per-cpu control struct containing CPU topology information and BAU tuneables.
- */
-struct bau_control {
- struct bau_desc *descriptor_base;
- struct bau_pq_entry *queue_first;
- struct bau_pq_entry *queue_last;
- struct bau_pq_entry *bau_msg_head;
- struct bau_control *uvhub_master;
- struct bau_control *socket_master;
- struct ptc_stats *statp;
- cpumask_t *cpumask;
- unsigned long timeout_interval;
- unsigned long set_bau_on_time;
- atomic_t active_descriptor_count;
- int plugged_tries;
- int timeout_tries;
- int ipi_attempts;
- int conseccompletes;
- u64 status_mmr;
- int status_index;
- bool nobau;
- short baudisabled;
- short cpu;
- short osnode;
- short uvhub_cpu;
- short uvhub;
- short uvhub_version;
- short cpus_in_socket;
- short cpus_in_uvhub;
- short partition_base_pnode;
- short busy; /* all were busy (war) */
- unsigned short message_number;
- unsigned short uvhub_quiesce;
- short socket_acknowledge_count[DEST_Q_SIZE];
- cycles_t send_message;
- cycles_t period_end;
- cycles_t period_time;
- spinlock_t uvhub_lock;
- spinlock_t queue_lock;
- spinlock_t disable_lock;
- /* tunables */
- int max_concurr;
- int max_concurr_const;
- int plugged_delay;
- int plugsb4reset;
- int timeoutsb4reset;
- int ipi_reset_limit;
- int complete_threshold;
- int cong_response_us;
- int cong_reps;
- cycles_t disabled_period;
- int period_giveups;
- int giveup_limit;
- long period_requests;
- struct hub_and_pnode *thp;
-};
-
-/* Abstracted BAU functions */
-struct bau_operations {
- unsigned long (*read_l_sw_ack)(void);
- unsigned long (*read_g_sw_ack)(int pnode);
- unsigned long (*bau_gpa_to_offset)(unsigned long vaddr);
- void (*write_l_sw_ack)(unsigned long mmr);
- void (*write_g_sw_ack)(int pnode, unsigned long mmr);
- void (*write_payload_first)(int pnode, unsigned long mmr);
- void (*write_payload_last)(int pnode, unsigned long mmr);
- int (*wait_completion)(struct bau_desc*,
- struct bau_control*, long try);
-};
-
-static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
-}
-
-static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
-}
-
-static inline void write_mmr_activation(unsigned long index)
-{
- write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
-}
-
-static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
-}
-
-static inline void write_mmr_proc_payload_first(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_FIRST, mmr_image);
-}
-
-static inline void write_mmr_proc_payload_last(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_LAST, mmr_image);
-}
-
-static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
-}
-
-static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
-}
-
-static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
-}
-
-static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
-{
- write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
-}
-
-static inline unsigned long read_mmr_misc_control(int pnode)
-{
- return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
-}
-
-static inline void write_mmr_sw_ack(unsigned long mr)
-{
- uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
-}
-
-static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
-{
- write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
-}
-
-static inline unsigned long read_mmr_sw_ack(void)
-{
- return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-}
-
-static inline unsigned long read_gmmr_sw_ack(int pnode)
-{
- return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-}
-
-static inline void write_mmr_proc_sw_ack(unsigned long mr)
-{
- uv_write_local_mmr(UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
-}
-
-static inline void write_gmmr_proc_sw_ack(int pnode, unsigned long mr)
-{
- write_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
-}
-
-static inline unsigned long read_mmr_proc_sw_ack(void)
-{
- return read_lmmr(UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
-}
-
-static inline unsigned long read_gmmr_proc_sw_ack(int pnode)
-{
- return read_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
-}
-
-static inline void write_mmr_data_config(int pnode, unsigned long mr)
-{
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
-}
-
-static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
-{
- return constant_test_bit(uvhub, &dstp->bits[0]);
-}
-static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
-{
- __set_bit(pnode, &dstp->bits[0]);
-}
-static inline void bau_uvhubs_clear(struct pnmask *dstp,
- int nbits)
-{
- bitmap_zero(&dstp->bits[0], nbits);
-}
-static inline int bau_uvhub_weight(struct pnmask *dstp)
-{
- return bitmap_weight((unsigned long *)&dstp->bits[0],
- UV_DISTRIBUTION_SIZE);
-}
-
-static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
-{
- bitmap_zero(&dstp->bits, nbits);
-}
-
-struct atomic_short {
- short counter;
-};
-
-/*
- * atomic_read_short - read a short atomic variable
- * @v: pointer of type atomic_short
- *
- * Atomically reads the value of @v.
- */
-static inline int atomic_read_short(const struct atomic_short *v)
-{
- return v->counter;
-}
-
-/*
- * atom_asr - add and return a short int
- * @i: short value to add
- * @v: pointer of type atomic_short
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static inline int atom_asr(short i, struct atomic_short *v)
-{
- short __i = i;
- asm volatile(LOCK_PREFIX "xaddw %0, %1"
- : "+r" (i), "+m" (v->counter)
- : : "memory");
- return i + __i;
-}
-
-/*
- * conditionally add 1 to *v, unless *v is >= u
- * return 0 if we cannot add 1 to *v because it is >= u
- * return 1 if we can add 1 to *v because it is < u
- * the add is atomic
- *
- * This is close to atomic_add_unless(), but this allows the 'u' value
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
- */
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
-{
- spin_lock(lock);
- if (atomic_read(v) >= u) {
- spin_unlock(lock);
- return 0;
- }
- atomic_inc(v);
- spin_unlock(lock);
- return 1;
-}
-
-void uv_bau_message_interrupt(struct pt_regs *regs);
-
-#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 100d66806503..5002f52be332 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,6 +5,7 @@
*
* SGI UV architectural definitions
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
@@ -129,17 +130,6 @@
*/
#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
-/* System Controller Interface Reg info */
-struct uv_scir_s {
- struct timer_list timer;
- unsigned long offset;
- unsigned long last;
- unsigned long idle_on;
- unsigned long idle_off;
- unsigned char state;
- unsigned char enabled;
-};
-
/* GAM (globally addressed memory) range table */
struct uv_gam_range_s {
u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */
@@ -155,6 +145,8 @@ struct uv_gam_range_s {
* available in the L3 cache on the cpu socket for the node.
*/
struct uv_hub_info_s {
+ unsigned int hub_type;
+ unsigned char hub_revision;
unsigned long global_mmr_base;
unsigned long global_mmr_shift;
unsigned long gpa_mask;
@@ -167,9 +159,9 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
unsigned char gr_table_len;
- unsigned char hub_revision;
unsigned char apic_pnode_shift;
unsigned char gpa_shift;
+ unsigned char nasid_shift;
unsigned char m_shift;
unsigned char n_lshift;
unsigned int gnode_extra;
@@ -191,16 +183,13 @@ struct uv_hub_info_s {
struct uv_cpu_info_s {
void *p_uv_hub_info;
unsigned char blade_cpu_id;
- struct uv_scir_s scir;
+ void *reserved;
};
DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
#define uv_cpu_info this_cpu_ptr(&__uv_cpu_info)
#define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
-#define uv_scir_info (&uv_cpu_info->scir)
-#define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir)
-
/* Node specific hub common info struct */
extern void **__uv_hub_info_list;
static inline struct uv_hub_info_s *uv_hub_info_list(int node)
@@ -219,6 +208,17 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
}
+static inline int uv_hub_type(void)
+{
+ return uv_hub_info->hub_type;
+}
+
+static inline __init void uv_hub_type_set(int uvmask)
+{
+ uv_hub_info->hub_type = uvmask;
+}
+
+
/*
* HUB revision ranges for each UV HUB architecture.
* This is a software convention - NOT the hardware revision numbers in
@@ -228,39 +228,31 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
+#define UV5_HUB_REVISION_BASE 9
-static inline int is_uv2_hub(void)
-{
- return is_uv_hubbed(uv(2));
-}
-
-static inline int is_uv3_hub(void)
-{
- return is_uv_hubbed(uv(3));
-}
+static inline int is_uv(int uvmask) { return uv_hub_type() & uvmask; }
+static inline int is_uv1_hub(void) { return 0; }
+static inline int is_uv2_hub(void) { return is_uv(UV2); }
+static inline int is_uv3_hub(void) { return is_uv(UV3); }
+static inline int is_uv4a_hub(void) { return is_uv(UV4A); }
+static inline int is_uv4_hub(void) { return is_uv(UV4); }
+static inline int is_uv5_hub(void) { return is_uv(UV5); }
-/* First test "is UV4A", then "is UV4" */
-static inline int is_uv4a_hub(void)
-{
- if (is_uv_hubbed(uv(4)))
- return (uv_hub_info->hub_revision == UV4A_HUB_REVISION_BASE);
- return 0;
-}
+/*
+ * UV4A is a revision of UV4. So on UV4A, both is_uv4_hub() and
+ * is_uv4a_hub() return true, While on UV4, only is_uv4_hub()
+ * returns true. So to get true results, first test if is UV4A,
+ * then test if is UV4.
+ */
-static inline int is_uv4_hub(void)
-{
- return is_uv_hubbed(uv(4));
-}
+/* UVX class: UV2,3,4 */
+static inline int is_uvx_hub(void) { return is_uv(UVX); }
-static inline int is_uvx_hub(void)
-{
- return (is_uv_hubbed(-2) >= uv(2));
-}
+/* UVY class: UV5,..? */
+static inline int is_uvy_hub(void) { return is_uv(UVY); }
-static inline int is_uv_hub(void)
-{
- return is_uvx_hub();
-}
+/* Any UV Hubbed System */
+static inline int is_uv_hub(void) { return is_uv(UV_ANY); }
union uvh_apicid {
unsigned long v;
@@ -282,9 +274,11 @@ union uvh_apicid {
* g - GNODE (full 15-bit global nasid, right shifted 1)
* p - PNODE (local part of nsids, right shifted 1)
*/
-#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_NASID_TO_PNODE(n) \
+ (((n) >> uv_hub_info->nasid_shift) & uv_hub_info->pnode_mask)
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
-#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
+#define UV_PNODE_TO_NASID(p) \
+ (UV_PNODE_TO_GNODE(p) << uv_hub_info->nasid_shift)
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
@@ -297,29 +291,42 @@ union uvh_apicid {
#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
#define UV4_LOCAL_MMR_BASE 0xfa000000UL
-#define UV4_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV4_GLOBAL_MMR32_BASE 0
#define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
-#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
+#define UV4_GLOBAL_MMR32_SIZE 0
+
+#define UV5_LOCAL_MMR_BASE 0xfa000000UL
+#define UV5_GLOBAL_MMR32_BASE 0
+#define UV5_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV5_GLOBAL_MMR32_SIZE 0
#define UV_LOCAL_MMR_BASE ( \
- is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
- is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
- /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
+ is_uv(UV2) ? UV2_LOCAL_MMR_BASE : \
+ is_uv(UV3) ? UV3_LOCAL_MMR_BASE : \
+ is_uv(UV4) ? UV4_LOCAL_MMR_BASE : \
+ is_uv(UV5) ? UV5_LOCAL_MMR_BASE : \
+ 0)
#define UV_GLOBAL_MMR32_BASE ( \
- is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
- is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
- /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
+ is_uv(UV2) ? UV2_GLOBAL_MMR32_BASE : \
+ is_uv(UV3) ? UV3_GLOBAL_MMR32_BASE : \
+ is_uv(UV4) ? UV4_GLOBAL_MMR32_BASE : \
+ is_uv(UV5) ? UV5_GLOBAL_MMR32_BASE : \
+ 0)
#define UV_LOCAL_MMR_SIZE ( \
- is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
- is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
- /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
+ is_uv(UV2) ? UV2_LOCAL_MMR_SIZE : \
+ is_uv(UV3) ? UV3_LOCAL_MMR_SIZE : \
+ is_uv(UV4) ? UV4_LOCAL_MMR_SIZE : \
+ is_uv(UV5) ? UV5_LOCAL_MMR_SIZE : \
+ 0)
#define UV_GLOBAL_MMR32_SIZE ( \
- is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
- is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
- /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
+ is_uv(UV2) ? UV2_GLOBAL_MMR32_SIZE : \
+ is_uv(UV3) ? UV3_GLOBAL_MMR32_SIZE : \
+ is_uv(UV4) ? UV4_GLOBAL_MMR32_SIZE : \
+ is_uv(UV5) ? UV5_GLOBAL_MMR32_SIZE : \
+ 0)
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
@@ -720,7 +727,7 @@ extern void uv_nmi_setup_hubless(void);
#define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */
#define UVH_TSC_SYNC_MASK 3 /* 0011 */
#define UVH_TSC_SYNC_VALID 3 /* 0011 */
-#define UVH_TSC_SYNC_INVALID 2 /* 0010 */
+#define UVH_TSC_SYNC_UNKNOWN 0 /* 0000 */
/* BMC sets a bit this MMR non-zero before sending an NMI */
#define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR
@@ -728,19 +735,6 @@ extern void uv_nmi_setup_hubless(void);
#define UVH_NMI_MMR_SHIFT 63
#define UVH_NMI_MMR_TYPE "SCRATCH5"
-/* Newer SMM NMI handler, not present in all systems */
-#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
-#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
-#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
-#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
-
-/* Non-zero indicates newer SMM NMI handler present */
-#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
-
-/* Indicates to BIOS that we want to use the newer SMM NMI handler */
-#define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2
-#define UVH_NMI_MMRX_REQ_SHIFT 62
-
struct uv_hub_nmi_s {
raw_spinlock_t nmi_lock;
atomic_t in_nmi; /* flag this node in UV NMI IRQ */
@@ -772,29 +766,6 @@ DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
#define UV_NMI_STATE_DUMP 2
#define UV_NMI_STATE_DUMP_DONE 3
-/* Update SCIR state */
-static inline void uv_set_scir_bits(unsigned char value)
-{
- if (uv_scir_info->state != value) {
- uv_scir_info->state = value;
- uv_write_local_mmr8(uv_scir_info->offset, value);
- }
-}
-
-static inline unsigned long uv_scir_offset(int apicid)
-{
- return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
-}
-
-static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
-{
- if (uv_cpu_scir_info(cpu)->state != value) {
- uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
- uv_cpu_scir_info(cpu)->offset, value);
- uv_cpu_scir_info(cpu)->state = value;
- }
-}
-
/*
* Get the minimum revision number of the hub chips within the partition.
* (See UVx_HUB_REVISION_BASE above for specific values.)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 775bf143a072..57fa67373262 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -3,8 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * SGI UV MMR definitions
+ * HPE UV MMR definitions
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2016 Silicon Graphics, Inc. All rights reserved.
*/
@@ -18,42 +19,43 @@
* grouped by architecture types.
*
* UVH - definitions common to all UV hub types.
- * UVXH - definitions common to all UV eXtended hub types (currently 2, 3, 4).
- * UV2H - definitions specific to UV type 2 hub.
- * UV3H - definitions specific to UV type 3 hub.
+ * UVXH - definitions common to UVX class (2, 3, 4).
+ * UVYH - definitions common to UVY class (5).
+ * UV5H - definitions specific to UV type 5 hub.
+ * UV4AH - definitions specific to UV type 4A hub.
* UV4H - definitions specific to UV type 4 hub.
- *
- * So in general, MMR addresses and structures are identical on all hubs types.
- * These MMRs are identified as:
- * #define UVH_xxx <address>
- * union uvh_xxx {
- * unsigned long v;
- * struct uvh_int_cmpd_s {
- * } s;
- * };
+ * UV3H - definitions specific to UV type 3 hub.
+ * UV2H - definitions specific to UV type 2 hub.
*
* If the MMR exists on all hub types but have different addresses,
- * use a conditional operator to define the value at runtime.
- * #define UV2Hxxx b
- * #define UV3Hxxx c
- * #define UV4Hxxx d
- * #define UV4AHxxx e
- * #define UVHxxx (is_uv2_hub() ? UV2Hxxx :
- * (is_uv3_hub() ? UV3Hxxx :
- * (is_uv4a_hub() ? UV4AHxxx :
- * UV4Hxxx))
+ * use a conditional operator to define the value at runtime. Any
+ * that are not defined are blank.
+ * (UV4A variations only generated if different from uv4)
+ * #define UVHxxx (
+ * is_uv(UV5) ? UV5Hxxx value :
+ * is_uv(UV4A) ? UV4AHxxx value :
+ * is_uv(UV4) ? UV4Hxxx value :
+ * is_uv(UV3) ? UV3Hxxx value :
+ * is_uv(UV2) ? UV2Hxxx value :
+ * <ucv> or <undef value>)
+ *
+ * Class UVX has UVs (2|3|4|4A).
+ * Class UVY has UVs (5).
*
* union uvh_xxx {
* unsigned long v;
* struct uvh_xxx_s { # Common fields only
* } s;
- * struct uv2h_xxx_s { # Full UV2 definition (*)
- * } s2;
- * struct uv3h_xxx_s { # Full UV3 definition (*)
- * } s3;
- * (NOTE: No struct uv4ah_xxx_s members exist)
+ * struct uv5h_xxx_s { # Full UV5 definition (*)
+ * } s5;
+ * struct uv4ah_xxx_s { # Full UV4A definition (*)
+ * } s4a;
* struct uv4h_xxx_s { # Full UV4 definition (*)
* } s4;
+ * struct uv3h_xxx_s { # Full UV3 definition (*)
+ * } s3;
+ * struct uv2h_xxx_s { # Full UV2 definition (*)
+ * } s2;
* };
* (* - if present and different than the common struct)
*
@@ -62,429 +64,499 @@
* if the contents is the same for all hubs, only the "s" structure is
* generated.
*
- * If the MMR exists on ONLY 1 type of hub, no generic definition is
- * generated:
- * #define UVnH_xxx <uvn address>
- * union uvnh_xxx {
- * unsigned long v;
- * struct uvh_int_cmpd_s {
- * } sn;
- * };
- *
- * (GEN Flags: mflags_opt= undefs=function UV234=UVXH)
+ * (GEN Flags: undefs=function)
*/
+ /* UV bit masks */
+#define UV2 (1 << 0)
+#define UV3 (1 << 1)
+#define UV4 (1 << 2)
+#define UV4A (1 << 3)
+#define UV5 (1 << 4)
+#define UVX (UV2|UV3|UV4)
+#define UVY (UV5)
+#define UV_ANY (~0)
+
+
+
+
#define UV_MMR_ENABLE (1UL << 63)
+#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
#define UV2_HUB_PART_NUMBER_X 0x1111
#define UV3_HUB_PART_NUMBER 0x9578
#define UV3_HUB_PART_NUMBER_X 0x4321
#define UV4_HUB_PART_NUMBER 0x99a1
+#define UV5_HUB_PART_NUMBER 0xa171
/* Error function to catch undefined references */
extern unsigned long uv_undefined(char *str);
/* ========================================================================= */
-/* UVH_BAU_DATA_BROADCAST */
-/* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST 0x61688UL
-
-#define UV2H_BAU_DATA_BROADCAST_32 0x440
-#define UV3H_BAU_DATA_BROADCAST_32 0x440
-#define UV4H_BAU_DATA_BROADCAST_32 0x360
-#define UVH_BAU_DATA_BROADCAST_32 ( \
- is_uv2_hub() ? UV2H_BAU_DATA_BROADCAST_32 : \
- is_uv3_hub() ? UV3H_BAU_DATA_BROADCAST_32 : \
- /*is_uv4_hub*/ UV4H_BAU_DATA_BROADCAST_32)
-
-#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
-#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
-
-
-union uvh_bau_data_broadcast_u {
- unsigned long v;
- struct uvh_bau_data_broadcast_s {
- unsigned long enable:1; /* RW */
- unsigned long rsvd_1_63:63;
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_BAU_DATA_CONFIG */
-/* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG 0x61680UL
-
-#define UV2H_BAU_DATA_CONFIG_32 0x438
-#define UV3H_BAU_DATA_CONFIG_32 0x438
-#define UV4H_BAU_DATA_CONFIG_32 0x358
-#define UVH_BAU_DATA_CONFIG_32 ( \
- is_uv2_hub() ? UV2H_BAU_DATA_CONFIG_32 : \
- is_uv3_hub() ? UV3H_BAU_DATA_CONFIG_32 : \
- /*is_uv4_hub*/ UV4H_BAU_DATA_CONFIG_32)
-
-#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
-#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
-#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
-#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
-#define UVH_BAU_DATA_CONFIG_P_SHFT 13
-#define UVH_BAU_DATA_CONFIG_T_SHFT 15
-#define UVH_BAU_DATA_CONFIG_M_SHFT 16
-#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
-#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-
-union uvh_bau_data_config_u {
- unsigned long v;
- struct uvh_bau_data_config_s {
- unsigned long vector_:8; /* RW */
- unsigned long dm:3; /* RW */
- unsigned long destmode:1; /* RW */
- unsigned long status:1; /* RO */
- unsigned long p:1; /* RO */
- unsigned long rsvd_14:1;
- unsigned long t:1; /* RO */
- unsigned long m:1; /* RW */
- unsigned long rsvd_17_31:15;
- unsigned long apic_id:32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x5e8
+/* UVH common defines*/
#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+/* UVXH common defines */
#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
-#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
-#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
-#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
-#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
-#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
-#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
-#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
-#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
-#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
-#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
-#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
-#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVXH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
-
-#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
-
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED0_KT_HCERR_SHFT 1
+#define UVYH_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED0_RH0_HCERR_SHFT 2
+#define UVYH_EVENT_OCCURRED0_RH0_HCERR_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED0_RH1_HCERR_SHFT 3
+#define UVYH_EVENT_OCCURRED0_RH1_HCERR_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED0_LH0_HCERR_SHFT 4
+#define UVYH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED0_LH1_HCERR_SHFT 5
+#define UVYH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED0_LH2_HCERR_SHFT 6
+#define UVYH_EVENT_OCCURRED0_LH2_HCERR_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED0_LH3_HCERR_SHFT 7
+#define UVYH_EVENT_OCCURRED0_LH3_HCERR_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED0_XB_HCERR_SHFT 8
+#define UVYH_EVENT_OCCURRED0_XB_HCERR_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED0_RDM_HCERR_SHFT 9
+#define UVYH_EVENT_OCCURRED0_RDM_HCERR_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED0_NI0_HCERR_SHFT 10
+#define UVYH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED0_NI1_HCERR_SHFT 11
+#define UVYH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED0_LB_AOERR0_SHFT 12
+#define UVYH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED0_KT_AOERR0_SHFT 13
+#define UVYH_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_SHFT 14
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_SHFT 15
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 16
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 17
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_SHFT 18
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_SHFT 19
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED0_XB_AOERR0_SHFT 20
+#define UVYH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_SHFT 21
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_SHFT 22
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_SHFT 23
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 24
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 25
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED0_LB_AOERR1_SHFT 26
+#define UVYH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED0_KT_AOERR1_SHFT 27
+#define UVYH_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_SHFT 28
+#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_SHFT 29
+#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 30
+#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 31
+#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_SHFT 32
+#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_SHFT 33
+#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_MASK 0x0000000200000000UL
+#define UVYH_EVENT_OCCURRED0_XB_AOERR1_SHFT 34
+#define UVYH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000400000000UL
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_SHFT 35
+#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_MASK 0x0000000800000000UL
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_SHFT 36
+#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_MASK 0x0000001000000000UL
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_SHFT 37
+#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_MASK 0x0000002000000000UL
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 38
+#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000004000000000UL
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 39
+#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000008000000000UL
+#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 40
+#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000010000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 41
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000020000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 42
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000040000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 43
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000080000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 44
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000100000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 45
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000200000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 46
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000400000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 47
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000800000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 48
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0001000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 49
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0002000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 50
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0004000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 51
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0008000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 52
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0010000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 53
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0020000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 54
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0040000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 55
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0080000000000000UL
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 56
+#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0100000000000000UL
+#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 57
+#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0200000000000000UL
+#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 58
+#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0400000000000000UL
+#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 59
+#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0800000000000000UL
+#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 60
+#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x1000000000000000UL
+#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 61
+#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x2000000000000000UL
+
+/* UV4 unique defines */
#define UV4H_EVENT_OCCURRED0_KT_HCERR_SHFT 1
-#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10
-#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17
-#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18
-#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19
-#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20
-#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21
-#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22
-#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23
-#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24
-#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25
-#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26
-#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27
-#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28
-#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29
-#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30
-#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31
-#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32
-#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33
-#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34
-#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35
-#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36
-#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52
-#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53
-#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54
-#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55
-#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56
-#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57
-#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58
-#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62
-#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63
#define UV4H_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10
#define UV4H_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17
#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18
#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19
#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20
#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21
#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22
#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23
#define UV4H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24
#define UV4H_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25
#define UV4H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26
#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27
#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28
#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29
#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30
#define UV4H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31
#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32
#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33
#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34
#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35
#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36
#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37
#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0002000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0004000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0008000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0010000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53
#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0020000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54
#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0040000000000000UL
+#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55
#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0080000000000000UL
+#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56
#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0100000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57
#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0200000000000000UL
+#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58
#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0400000000000000UL
+#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59
#define UV4H_EVENT_OCCURRED0_IPI_INT_MASK 0x0800000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60
#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x1000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61
#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x2000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62
#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x4000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63
#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x8000000000000000UL
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK ( \
+ is_uv(UV4) ? 0x1000000000000000UL : \
+ is_uv(UV3) ? 0x0040000000000000UL : \
+ is_uv(UV2) ? 0x0040000000000000UL : \
+ 0)
#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+ is_uv(UV4) ? 60 : \
+ is_uv(UV3) ? 54 : \
+ is_uv(UV2) ? 54 : \
+ -1)
union uvh_event_occurred0_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_event_occurred0_s {
- unsigned long lb_hcerr:1; /* RW, W1C */
- unsigned long rsvd_1_10:10;
- unsigned long rh_aoerr0:1; /* RW, W1C */
- unsigned long rsvd_12_63:52;
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long rsvd_1_63:63;
} s;
+
+ /* UVXH common struct */
struct uvxh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long rsvd_1:1;
@@ -505,6 +577,142 @@ union uvh_event_occurred0_u {
unsigned long xb_aoerr0:1; /* RW */
unsigned long rsvd_17_63:47;
} sx;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long kt_hcerr:1; /* RW */
+ unsigned long rh0_hcerr:1; /* RW */
+ unsigned long rh1_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long lh2_hcerr:1; /* RW */
+ unsigned long lh3_hcerr:1; /* RW */
+ unsigned long xb_hcerr:1; /* RW */
+ unsigned long rdm_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long kt_aoerr0:1; /* RW */
+ unsigned long rh0_aoerr0:1; /* RW */
+ unsigned long rh1_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long lh2_aoerr0:1; /* RW */
+ unsigned long lh3_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rdm_aoerr0:1; /* RW */
+ unsigned long rt0_aoerr0:1; /* RW */
+ unsigned long rt1_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long kt_aoerr1:1; /* RW */
+ unsigned long rh0_aoerr1:1; /* RW */
+ unsigned long rh1_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long lh2_aoerr1:1; /* RW */
+ unsigned long lh3_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rdm_aoerr1:1; /* RW */
+ unsigned long rt0_aoerr1:1; /* RW */
+ unsigned long rt1_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long rsvd_62_63:2;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long kt_hcerr:1; /* RW */
+ unsigned long rh0_hcerr:1; /* RW */
+ unsigned long rh1_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long lh2_hcerr:1; /* RW */
+ unsigned long lh3_hcerr:1; /* RW */
+ unsigned long xb_hcerr:1; /* RW */
+ unsigned long rdm_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long kt_aoerr0:1; /* RW */
+ unsigned long rh0_aoerr0:1; /* RW */
+ unsigned long rh1_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long lh2_aoerr0:1; /* RW */
+ unsigned long lh3_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rdm_aoerr0:1; /* RW */
+ unsigned long rt0_aoerr0:1; /* RW */
+ unsigned long rt1_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long kt_aoerr1:1; /* RW */
+ unsigned long rh0_aoerr1:1; /* RW */
+ unsigned long rh1_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long lh2_aoerr1:1; /* RW */
+ unsigned long lh3_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rdm_aoerr1:1; /* RW */
+ unsigned long rt0_aoerr1:1; /* RW */
+ unsigned long rt1_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long rsvd_62_63:2;
+ } s5;
+
+ /* UV4 unique struct */
struct uv4h_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long kt_hcerr:1; /* RW */
@@ -571,13 +779,1355 @@ union uvh_event_occurred0_u {
unsigned long extio_int2:1; /* RW */
unsigned long extio_int3:1; /* RW */
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } s2;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED1 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED1 0x70080UL
+
+
+
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED1_IPI_INT_SHFT 0
+#define UVYH_EVENT_OCCURRED1_IPI_INT_MASK 0x0000000000000001UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_SHFT 1
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_SHFT 2
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_SHFT 3
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_SHFT 4
+#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED1_PROFILE_INT_SHFT 5
+#define UVYH_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED1_BAU_DATA_SHFT 6
+#define UVYH_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_SHFT 7
+#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_SHFT 8
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_SHFT 9
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_SHFT 10
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_SHFT 11
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_SHFT 12
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_SHFT 13
+#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_SHFT 14
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_SHFT 15
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_SHFT 16
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_SHFT 17
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_SHFT 18
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_SHFT 19
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_SHFT 20
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_SHFT 21
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_SHFT 22
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_SHFT 23
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_SHFT 24
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_SHFT 25
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_SHFT 26
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_SHFT 27
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_SHFT 28
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_SHFT 29
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_SHFT 30
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_SHFT 31
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_SHFT 32
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_SHFT 33
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_MASK 0x0000000200000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_SHFT 34
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_MASK 0x0000000400000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_SHFT 35
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_MASK 0x0000000800000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_SHFT 36
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_MASK 0x0000001000000000UL
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_SHFT 37
+#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_MASK 0x0000002000000000UL
+
+/* UV4 unique defines */
+#define UV4H_EVENT_OCCURRED1_PROFILE_INT_SHFT 0
+#define UV4H_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000001UL
+#define UV4H_EVENT_OCCURRED1_BAU_DATA_SHFT 1
+#define UV4H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_SHFT 2
+#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000004UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 3
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000000008UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 4
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000000010UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 5
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000000020UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 6
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000000040UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 7
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000000080UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 8
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000000100UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 9
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000000000200UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 10
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 11
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000000000800UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 12
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000000001000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 13
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000000002000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 14
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000000004000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 15
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000000008000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 16
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000000010000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 17
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 18
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_SHFT 19
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_SHFT 20
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_SHFT 21
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_SHFT 22
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_SHFT 23
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_SHFT 24
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_SHFT 25
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_SHFT 26
+#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 27
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 28
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 29
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 30
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 31
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 32
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 33
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 34
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 35
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 36
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 37
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 38
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 39
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 40
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 41
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 42
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_SHFT 43
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_SHFT 44
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_SHFT 45
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_SHFT 46
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_SHFT 47
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_SHFT 48
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_SHFT 49
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_MASK 0x0002000000000000UL
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_SHFT 50
+#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_MASK 0x0004000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED1_BAU_DATA_SHFT 0
+#define UV3H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL
+#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1
+#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17
+#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33
+#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49
+#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL
+#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50
+#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL
+#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51
+#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED1_BAU_DATA_SHFT 0
+#define UV2H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1
+#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17
+#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33
+#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49
+#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50
+#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51
+#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL
+
+#define UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK ( \
+ is_uv(UV5) ? 0x0000000000000002UL : \
+ 0)
+#define UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT ( \
+ is_uv(UV5) ? 1 : \
+ -1)
+
+union uvyh_event_occurred1_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred1_s {
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long xh_tlb_int0:1; /* RW */
+ unsigned long xh_tlb_int1:1; /* RW */
+ unsigned long xh_tlb_int2:1; /* RW */
+ unsigned long xh_tlb_int3:1; /* RW */
+ unsigned long xh_tlb_int4:1; /* RW */
+ unsigned long xh_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int0:1; /* RW */
+ unsigned long rdm_tlb_int1:1; /* RW */
+ unsigned long rdm_tlb_int2:1; /* RW */
+ unsigned long rdm_tlb_int3:1; /* RW */
+ unsigned long rdm_tlb_int4:1; /* RW */
+ unsigned long rdm_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int6:1; /* RW */
+ unsigned long rdm_tlb_int7:1; /* RW */
+ unsigned long rdm_tlb_int8:1; /* RW */
+ unsigned long rdm_tlb_int9:1; /* RW */
+ unsigned long rdm_tlb_int10:1; /* RW */
+ unsigned long rdm_tlb_int11:1; /* RW */
+ unsigned long rdm_tlb_int12:1; /* RW */
+ unsigned long rdm_tlb_int13:1; /* RW */
+ unsigned long rdm_tlb_int14:1; /* RW */
+ unsigned long rdm_tlb_int15:1; /* RW */
+ unsigned long rdm_tlb_int16:1; /* RW */
+ unsigned long rdm_tlb_int17:1; /* RW */
+ unsigned long rdm_tlb_int18:1; /* RW */
+ unsigned long rdm_tlb_int19:1; /* RW */
+ unsigned long rdm_tlb_int20:1; /* RW */
+ unsigned long rdm_tlb_int21:1; /* RW */
+ unsigned long rdm_tlb_int22:1; /* RW */
+ unsigned long rdm_tlb_int23:1; /* RW */
+ unsigned long rsvd_38_63:26;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred1_s {
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long xh_tlb_int0:1; /* RW */
+ unsigned long xh_tlb_int1:1; /* RW */
+ unsigned long xh_tlb_int2:1; /* RW */
+ unsigned long xh_tlb_int3:1; /* RW */
+ unsigned long xh_tlb_int4:1; /* RW */
+ unsigned long xh_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int0:1; /* RW */
+ unsigned long rdm_tlb_int1:1; /* RW */
+ unsigned long rdm_tlb_int2:1; /* RW */
+ unsigned long rdm_tlb_int3:1; /* RW */
+ unsigned long rdm_tlb_int4:1; /* RW */
+ unsigned long rdm_tlb_int5:1; /* RW */
+ unsigned long rdm_tlb_int6:1; /* RW */
+ unsigned long rdm_tlb_int7:1; /* RW */
+ unsigned long rdm_tlb_int8:1; /* RW */
+ unsigned long rdm_tlb_int9:1; /* RW */
+ unsigned long rdm_tlb_int10:1; /* RW */
+ unsigned long rdm_tlb_int11:1; /* RW */
+ unsigned long rdm_tlb_int12:1; /* RW */
+ unsigned long rdm_tlb_int13:1; /* RW */
+ unsigned long rdm_tlb_int14:1; /* RW */
+ unsigned long rdm_tlb_int15:1; /* RW */
+ unsigned long rdm_tlb_int16:1; /* RW */
+ unsigned long rdm_tlb_int17:1; /* RW */
+ unsigned long rdm_tlb_int18:1; /* RW */
+ unsigned long rdm_tlb_int19:1; /* RW */
+ unsigned long rdm_tlb_int20:1; /* RW */
+ unsigned long rdm_tlb_int21:1; /* RW */
+ unsigned long rdm_tlb_int22:1; /* RW */
+ unsigned long rdm_tlb_int23:1; /* RW */
+ unsigned long rsvd_38_63:26;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_event_occurred1_s {
+ unsigned long profile_int:1; /* RW */
+ unsigned long bau_data:1; /* RW */
+ unsigned long proc_general:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr0_tlb_int16:1; /* RW */
+ unsigned long gr0_tlb_int17:1; /* RW */
+ unsigned long gr0_tlb_int18:1; /* RW */
+ unsigned long gr0_tlb_int19:1; /* RW */
+ unsigned long gr0_tlb_int20:1; /* RW */
+ unsigned long gr0_tlb_int21:1; /* RW */
+ unsigned long gr0_tlb_int22:1; /* RW */
+ unsigned long gr0_tlb_int23:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int16:1; /* RW */
+ unsigned long gr1_tlb_int17:1; /* RW */
+ unsigned long gr1_tlb_int18:1; /* RW */
+ unsigned long gr1_tlb_int19:1; /* RW */
+ unsigned long gr1_tlb_int20:1; /* RW */
+ unsigned long gr1_tlb_int21:1; /* RW */
+ unsigned long gr1_tlb_int22:1; /* RW */
+ unsigned long gr1_tlb_int23:1; /* RW */
+ unsigned long rsvd_51_63:13;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred1_s {
+ unsigned long bau_data:1; /* RW */
+ unsigned long power_management_req:1; /* RW */
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rsvd_52_63:12;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred1_s {
+ unsigned long bau_data:1; /* RW */
+ unsigned long power_management_req:1; /* RW */
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long gr0_tlb_int0:1; /* RW */
+ unsigned long gr0_tlb_int1:1; /* RW */
+ unsigned long gr0_tlb_int2:1; /* RW */
+ unsigned long gr0_tlb_int3:1; /* RW */
+ unsigned long gr0_tlb_int4:1; /* RW */
+ unsigned long gr0_tlb_int5:1; /* RW */
+ unsigned long gr0_tlb_int6:1; /* RW */
+ unsigned long gr0_tlb_int7:1; /* RW */
+ unsigned long gr0_tlb_int8:1; /* RW */
+ unsigned long gr0_tlb_int9:1; /* RW */
+ unsigned long gr0_tlb_int10:1; /* RW */
+ unsigned long gr0_tlb_int11:1; /* RW */
+ unsigned long gr0_tlb_int12:1; /* RW */
+ unsigned long gr0_tlb_int13:1; /* RW */
+ unsigned long gr0_tlb_int14:1; /* RW */
+ unsigned long gr0_tlb_int15:1; /* RW */
+ unsigned long gr1_tlb_int0:1; /* RW */
+ unsigned long gr1_tlb_int1:1; /* RW */
+ unsigned long gr1_tlb_int2:1; /* RW */
+ unsigned long gr1_tlb_int3:1; /* RW */
+ unsigned long gr1_tlb_int4:1; /* RW */
+ unsigned long gr1_tlb_int5:1; /* RW */
+ unsigned long gr1_tlb_int6:1; /* RW */
+ unsigned long gr1_tlb_int7:1; /* RW */
+ unsigned long gr1_tlb_int8:1; /* RW */
+ unsigned long gr1_tlb_int9:1; /* RW */
+ unsigned long gr1_tlb_int10:1; /* RW */
+ unsigned long gr1_tlb_int11:1; /* RW */
+ unsigned long gr1_tlb_int12:1; /* RW */
+ unsigned long gr1_tlb_int13:1; /* RW */
+ unsigned long gr1_tlb_int14:1; /* RW */
+ unsigned long gr1_tlb_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rsvd_52_63:12;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED1_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED1_ALIAS 0x70088UL
+
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED2 0x70100UL
+
+
+
+/* UVYH common defines */
+#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 0
+#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000000001UL
+#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 1
+#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000000002UL
+#define UVYH_EVENT_OCCURRED2_RTC_0_SHFT 2
+#define UVYH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000004UL
+#define UVYH_EVENT_OCCURRED2_RTC_1_SHFT 3
+#define UVYH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000008UL
+#define UVYH_EVENT_OCCURRED2_RTC_2_SHFT 4
+#define UVYH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000010UL
+#define UVYH_EVENT_OCCURRED2_RTC_3_SHFT 5
+#define UVYH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000020UL
+#define UVYH_EVENT_OCCURRED2_RTC_4_SHFT 6
+#define UVYH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000040UL
+#define UVYH_EVENT_OCCURRED2_RTC_5_SHFT 7
+#define UVYH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000080UL
+#define UVYH_EVENT_OCCURRED2_RTC_6_SHFT 8
+#define UVYH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000100UL
+#define UVYH_EVENT_OCCURRED2_RTC_7_SHFT 9
+#define UVYH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000200UL
+#define UVYH_EVENT_OCCURRED2_RTC_8_SHFT 10
+#define UVYH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000400UL
+#define UVYH_EVENT_OCCURRED2_RTC_9_SHFT 11
+#define UVYH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000800UL
+#define UVYH_EVENT_OCCURRED2_RTC_10_SHFT 12
+#define UVYH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000001000UL
+#define UVYH_EVENT_OCCURRED2_RTC_11_SHFT 13
+#define UVYH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000002000UL
+#define UVYH_EVENT_OCCURRED2_RTC_12_SHFT 14
+#define UVYH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000004000UL
+#define UVYH_EVENT_OCCURRED2_RTC_13_SHFT 15
+#define UVYH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000008000UL
+#define UVYH_EVENT_OCCURRED2_RTC_14_SHFT 16
+#define UVYH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000010000UL
+#define UVYH_EVENT_OCCURRED2_RTC_15_SHFT 17
+#define UVYH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000020000UL
+#define UVYH_EVENT_OCCURRED2_RTC_16_SHFT 18
+#define UVYH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000040000UL
+#define UVYH_EVENT_OCCURRED2_RTC_17_SHFT 19
+#define UVYH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000080000UL
+#define UVYH_EVENT_OCCURRED2_RTC_18_SHFT 20
+#define UVYH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000100000UL
+#define UVYH_EVENT_OCCURRED2_RTC_19_SHFT 21
+#define UVYH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000200000UL
+#define UVYH_EVENT_OCCURRED2_RTC_20_SHFT 22
+#define UVYH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000400000UL
+#define UVYH_EVENT_OCCURRED2_RTC_21_SHFT 23
+#define UVYH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000800000UL
+#define UVYH_EVENT_OCCURRED2_RTC_22_SHFT 24
+#define UVYH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000001000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_23_SHFT 25
+#define UVYH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000002000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_24_SHFT 26
+#define UVYH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000004000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_25_SHFT 27
+#define UVYH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000008000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_26_SHFT 28
+#define UVYH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000010000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_27_SHFT 29
+#define UVYH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000020000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_28_SHFT 30
+#define UVYH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000040000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_29_SHFT 31
+#define UVYH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000080000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_30_SHFT 32
+#define UVYH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000100000000UL
+#define UVYH_EVENT_OCCURRED2_RTC_31_SHFT 33
+#define UVYH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000200000000UL
+
+/* UV4 unique defines */
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18
+#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19
+#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20
+#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21
+#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22
+#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23
+#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24
+#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25
+#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26
+#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27
+#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28
+#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29
+#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30
+#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31
+#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32
+#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33
+#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34
+#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35
+#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36
+#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37
+#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38
+#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39
+#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40
+#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41
+#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42
+#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43
+#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44
+#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45
+#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46
+#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47
+#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48
+#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49
+#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+/* UV2 unique defines */
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+#define UVH_EVENT_OCCURRED2_RTC_1_MASK ( \
+ is_uv(UV5) ? 0x0000000000000008UL : \
+ is_uv(UV4) ? 0x0000000000080000UL : \
+ is_uv(UV3) ? 0x0000000000000002UL : \
+ is_uv(UV2) ? 0x0000000000000002UL : \
+ 0)
+#define UVH_EVENT_OCCURRED2_RTC_1_SHFT ( \
+ is_uv(UV5) ? 3 : \
+ is_uv(UV4) ? 19 : \
+ is_uv(UV3) ? 1 : \
+ is_uv(UV2) ? 1 : \
+ -1)
+
+union uvyh_event_occurred2_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_event_occurred2_s {
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_34_63:30;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_event_occurred2_s {
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_34_63:30;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_event_occurred2_s {
+ unsigned long message_accelerator_int0:1; /* RW */
+ unsigned long message_accelerator_int1:1; /* RW */
+ unsigned long message_accelerator_int2:1; /* RW */
+ unsigned long message_accelerator_int3:1; /* RW */
+ unsigned long message_accelerator_int4:1; /* RW */
+ unsigned long message_accelerator_int5:1; /* RW */
+ unsigned long message_accelerator_int6:1; /* RW */
+ unsigned long message_accelerator_int7:1; /* RW */
+ unsigned long message_accelerator_int8:1; /* RW */
+ unsigned long message_accelerator_int9:1; /* RW */
+ unsigned long message_accelerator_int10:1; /* RW */
+ unsigned long message_accelerator_int11:1; /* RW */
+ unsigned long message_accelerator_int12:1; /* RW */
+ unsigned long message_accelerator_int13:1; /* RW */
+ unsigned long message_accelerator_int14:1; /* RW */
+ unsigned long message_accelerator_int15:1; /* RW */
+ unsigned long rtc_interval_int:1; /* RW */
+ unsigned long bau_dashboard_int:1; /* RW */
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_50_63:14;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED2_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED2_ALIAS 0x70108UL
/* ========================================================================= */
@@ -585,51 +2135,148 @@ union uvh_event_occurred0_u {
/* ========================================================================= */
#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
-#define UV2H_EXTIO_INT0_BROADCAST_32 0x3f0
-#define UV3H_EXTIO_INT0_BROADCAST_32 0x3f0
-#define UV4H_EXTIO_INT0_BROADCAST_32 0x310
-#define UVH_EXTIO_INT0_BROADCAST_32 ( \
- is_uv2_hub() ? UV2H_EXTIO_INT0_BROADCAST_32 : \
- is_uv3_hub() ? UV3H_EXTIO_INT0_BROADCAST_32 : \
- /*is_uv4_hub*/ UV4H_EXTIO_INT0_BROADCAST_32)
-
+/* UVH common defines*/
#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
union uvh_extio_int0_broadcast_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_extio_int0_broadcast_s {
unsigned long enable:1; /* RW */
unsigned long rsvd_1_63:63;
} s;
+
+ /* UV5 unique struct */
+ struct uv5h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_GAM_GR_CONFIG ( \
+ is_uv(UV5) ? 0x600028UL : \
+ is_uv(UV4) ? 0x600028UL : \
+ is_uv(UV3) ? 0xc00028UL : \
+ is_uv(UV2) ? 0xc00028UL : \
+ 0)
+
+
+
+/* UVYH common defines */
+#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV4 unique defines */
+#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV3 unique defines */
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+/* UV2 unique defines */
+#define UV2H_GR0_GAM_GR_CONFIG_N_GR_SHFT 0
+#define UV2H_GR0_GAM_GR_CONFIG_N_GR_MASK 0x000000000000000fUL
+
+
+union uvyh_gr0_gam_gr_config_u {
+ unsigned long v;
+
+ /* UVYH common struct */
+ struct uvyh_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_gam_gr_config_s {
+ unsigned long rsvd_0_9:10;
+ unsigned long subspace:1; /* RW */
+ unsigned long rsvd_11_63:53;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr0_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr0_gam_gr_config_s {
+ unsigned long n_gr:4; /* RW */
+ unsigned long reserved:60;
+ } s2;
};
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
-
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT0_CONFIG ( \
+ is_uv(UV4) ? 0x61b00UL : \
+ is_uv(UV3) ? 0x61b00UL : \
+ is_uv(UV2) ? 0x61b00UL : \
+ uv_undefined("UVH_GR0_TLB_INT0_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVXH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVXH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVXH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVXH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int0_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr0_tlb_int0_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -642,33 +2289,97 @@ union uvh_gr0_tlb_int0_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_GR0_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
-
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+#define UVH_GR0_TLB_INT1_CONFIG ( \
+ is_uv(UV4) ? 0x61b40UL : \
+ is_uv(UV3) ? 0x61b40UL : \
+ is_uv(UV2) ? 0x61b40UL : \
+ uv_undefined("UVH_GR0_TLB_INT1_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVXH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVXH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVXH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVXH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr0_tlb_int1_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr0_tlb_int1_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -681,382 +2392,97 @@ union uvh_gr0_tlb_int1_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_CONTROL */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UV4H_GR0_TLB_MMR_CONTROL 0x601080UL
-#define UVH_GR0_TLB_MMR_CONTROL ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL)
-
-#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-
-#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-
-#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 13
-#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59
-#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL
-#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL
-
-#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK)
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK)
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT)
-
-union uvh_gr0_tlb_mmr_control_u {
- unsigned long v;
- struct uvh_gr0_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_48:17;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
- } s;
- struct uvxh_gr0_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long rsvd_48:1;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
+ /* UVXH common struct */
+ struct uvxh_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} sx;
- struct uv2h_gr0_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53_63:11;
- } s2;
- struct uv3h_gr0_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long undef_52:1; /* Undefined */
- unsigned long rsvd_53_63:11;
- } s3;
- struct uv4h_gr0_tlb_mmr_control_s {
- unsigned long index:13; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_15:1;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_58:7;
- unsigned long page_size:5; /* RW */
+
+ /* UV4 unique struct */
+ struct uv4h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_READ_DATA_HI */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI 0x6010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_HI : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_HI)
-
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_SHFT 34
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 49
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-
-union uvh_gr0_tlb_mmr_read_data_hi_u {
- unsigned long v;
- struct uv2h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s2;
- struct uv3h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_46_54:9; /* Undefined */
- unsigned long way_ecc:9; /* RO */
+ /* UV3 unique struct */
+ struct uv3h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s3;
- struct uv4h_gr0_tlb_mmr_read_data_hi_s {
- unsigned long pfn:34; /* RO */
- unsigned long pnid:15; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_54:1; /* Undefined */
- unsigned long way_ecc:9; /* RO */
- } s4;
-};
-/* ========================================================================= */
-/* UVH_GR0_TLB_MMR_READ_DATA_LO */
-/* ========================================================================= */
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO 0x6010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO ( \
- is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
- is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_LO : \
- /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_LO)
-
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-
-union uvh_gr0_tlb_mmr_read_data_lo_u {
- unsigned long v;
- struct uvh_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s;
- struct uvxh_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } sx;
- struct uv2h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
+ /* UV2 unique struct */
+ struct uv2h_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s3;
- struct uv4h_gr0_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s4;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UV2H_GR1_TLB_INT0_CONFIG 0x61f00UL
-#define UV3H_GR1_TLB_INT0_CONFIG 0x61f00UL
-#define UV4H_GR1_TLB_INT0_CONFIG 0x62100UL
#define UVH_GR1_TLB_INT0_CONFIG ( \
- is_uv2_hub() ? UV2H_GR1_TLB_INT0_CONFIG : \
- is_uv3_hub() ? UV3H_GR1_TLB_INT0_CONFIG : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_INT0_CONFIG)
-
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+ is_uv(UV4) ? 0x62100UL : \
+ is_uv(UV3) ? 0x61f00UL : \
+ is_uv(UV2) ? 0x61f00UL : \
+ uv_undefined("UVH_GR1_TLB_INT0_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVXH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVXH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVXH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVXH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int0_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr1_tlb_int0_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -1069,39 +2495,97 @@ union uvh_gr1_tlb_int0_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s2;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UV2H_GR1_TLB_INT1_CONFIG 0x61f40UL
-#define UV3H_GR1_TLB_INT1_CONFIG 0x61f40UL
-#define UV4H_GR1_TLB_INT1_CONFIG 0x62140UL
#define UVH_GR1_TLB_INT1_CONFIG ( \
- is_uv2_hub() ? UV2H_GR1_TLB_INT1_CONFIG : \
- is_uv3_hub() ? UV3H_GR1_TLB_INT1_CONFIG : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_INT1_CONFIG)
-
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
-#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
-#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
-#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
-#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
-#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+ is_uv(UV4) ? 0x62140UL : \
+ is_uv(UV3) ? 0x61f40UL : \
+ is_uv(UV2) ? 0x61f40UL : \
+ uv_undefined("UVH_GR1_TLB_INT1_CONFIG"))
+
+
+/* UVXH common defines */
+#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVXH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVXH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVXH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVXH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVXH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_gr1_tlb_int1_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_gr1_tlb_int1_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -1114,337 +2598,62 @@ union uvh_gr1_tlb_int1_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_CONTROL */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UV4H_GR1_TLB_MMR_CONTROL 0x701080UL
-#define UVH_GR1_TLB_MMR_CONTROL ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_CONTROL : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_CONTROL)
-
-#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-
-#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
-#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
-#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
-
-#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
-#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
-#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-
-#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
-#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 13
-#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
-#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
-#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59
-#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL
-#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
-#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL
-
-
-union uvh_gr1_tlb_mmr_control_u {
- unsigned long v;
- struct uvh_gr1_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_48:17;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
- } s;
- struct uvxh_gr1_tlb_mmr_control_s {
- unsigned long rsvd_0_15:16;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long rsvd_48:1;
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_63:12;
+ /* UVXH common struct */
+ struct uvxh_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} sx;
- struct uv2h_gr1_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long rsvd_21_29:9;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long mmr_inj_con:1; /* RW */
- unsigned long rsvd_49_51:3;
- unsigned long mmr_inj_tlbram:1; /* RW */
- unsigned long rsvd_53_63:11;
- } s2;
- struct uv3h_gr1_tlb_mmr_control_s {
- unsigned long index:12; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_14_15:2;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long undef_52:1; /* Undefined */
- unsigned long rsvd_53_63:11;
- } s3;
- struct uv4h_gr1_tlb_mmr_control_s {
- unsigned long index:13; /* RW */
- unsigned long mem_sel:2; /* RW */
- unsigned long rsvd_15:1;
- unsigned long auto_valid_en:1; /* RW */
- unsigned long rsvd_17_19:3;
- unsigned long mmr_hash_index_en:1; /* RW */
- unsigned long ecc_sel:1; /* RW */
- unsigned long rsvd_22_29:8;
- unsigned long mmr_write:1; /* WP */
- unsigned long mmr_read:1; /* WP */
- unsigned long mmr_op_done:1; /* RW */
- unsigned long rsvd_33_47:15;
- unsigned long undef_48:1; /* Undefined */
- unsigned long rsvd_49_51:3;
- unsigned long rsvd_52_58:7;
- unsigned long page_size:5; /* RW */
+
+ /* UV4 unique struct */
+ struct uv4h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_READ_DATA_HI */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI 0x7010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_HI : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_HI)
-
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_SHFT 34
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 49
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
-
-
-union uvh_gr1_tlb_mmr_read_data_hi_u {
- unsigned long v;
- struct uv2h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long rsvd_45_63:19;
- } s2;
- struct uv3h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:41; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_46_54:9; /* Undefined */
- unsigned long way_ecc:9; /* RO */
+ /* UV3 unique struct */
+ struct uv3h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s3;
- struct uv4h_gr1_tlb_mmr_read_data_hi_s {
- unsigned long pfn:34; /* RO */
- unsigned long pnid:15; /* RO */
- unsigned long gaa:2; /* RO */
- unsigned long dirty:1; /* RO */
- unsigned long larger:1; /* RO */
- unsigned long aa_ext:1; /* RO */
- unsigned long undef_54:1; /* Undefined */
- unsigned long way_ecc:9; /* RO */
- } s4;
-};
-/* ========================================================================= */
-/* UVH_GR1_TLB_MMR_READ_DATA_LO */
-/* ========================================================================= */
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO 0x7010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO ( \
- is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
- is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_LO : \
- /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_LO)
-
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
-#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
-
-
-union uvh_gr1_tlb_mmr_read_data_lo_u {
- unsigned long v;
- struct uvh_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s;
- struct uvxh_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } sx;
- struct uv2h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
+ /* UV2 unique struct */
+ struct uv2h_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s3;
- struct uv4h_gr1_tlb_mmr_read_data_lo_s {
- unsigned long vpn:39; /* RO */
- unsigned long asid:24; /* RO */
- unsigned long valid:1; /* RO */
- } s4;
};
/* ========================================================================= */
@@ -1452,52 +2661,43 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
/* ========================================================================= */
#define UVH_INT_CMPB 0x22080UL
+/* UVH common defines*/
#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
union uvh_int_cmpb_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_int_cmpb_s {
unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
} s;
-};
-
-/* ========================================================================= */
-/* UVH_INT_CMPC */
-/* ========================================================================= */
-#define UVH_INT_CMPC 0x22100UL
-
-
-#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
-#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
-
-union uvh_int_cmpc_u {
- unsigned long v;
- struct uvh_int_cmpc_s {
- unsigned long real_time_cmpc:56; /* RW */
+ /* UV5 unique struct */
+ struct uv5h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
- } s;
-};
+ } s5;
-/* ========================================================================= */
-/* UVH_INT_CMPD */
-/* ========================================================================= */
-#define UVH_INT_CMPD 0x22180UL
-
-
-#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
-#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
+ /* UV4 unique struct */
+ struct uv4h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s4;
+ /* UV3 unique struct */
+ struct uv3h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s3;
-union uvh_int_cmpd_u {
- unsigned long v;
- struct uvh_int_cmpd_s {
- unsigned long real_time_cmpd:56; /* RW */
+ /* UV2 unique struct */
+ struct uv2h_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
unsigned long rsvd_56_63:8;
- } s;
+ } s2;
};
/* ========================================================================= */
@@ -1505,28 +2705,23 @@ union uvh_int_cmpd_u {
/* ========================================================================= */
#define UVH_IPI_INT 0x60500UL
-#define UV2H_IPI_INT_32 0x348
-#define UV3H_IPI_INT_32 0x348
-#define UV4H_IPI_INT_32 0x268
-#define UVH_IPI_INT_32 ( \
- is_uv2_hub() ? UV2H_IPI_INT_32 : \
- is_uv3_hub() ? UV3H_IPI_INT_32 : \
- /*is_uv4_hub*/ UV4H_IPI_INT_32)
-
+/* UVH common defines*/
#define UVH_IPI_INT_VECTOR_SHFT 0
-#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
-#define UVH_IPI_INT_DESTMODE_SHFT 11
-#define UVH_IPI_INT_APIC_ID_SHFT 16
-#define UVH_IPI_INT_SEND_SHFT 63
#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
+#define UVH_IPI_INT_DESTMODE_SHFT 11
#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_IPI_INT_APIC_ID_SHFT 16
#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
+#define UVH_IPI_INT_SEND_SHFT 63
#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
union uvh_ipi_int_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_ipi_int_s {
unsigned long vector_:8; /* RW */
unsigned long delivery_mode:3; /* RW */
@@ -1536,903 +2731,105 @@ union uvh_ipi_int_u {
unsigned long rsvd_48_62:15;
unsigned long send:1; /* WP */
} s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
-
-
-union uvh_lb_bau_intd_payload_queue_first_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_48:6;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_first_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_48:6;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s3;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
-
-
-union uvh_lb_bau_intd_payload_queue_last_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_last_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s3;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL")
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL)
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
-
-
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
-
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
-#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+ /* UV5 unique struct */
+ struct uv5h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s5;
-union uvh_lb_bau_intd_payload_queue_tail_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s2;
- struct uv3h_lb_bau_intd_payload_queue_tail_s {
- unsigned long rsvd_0_3:4;
- unsigned long address:39; /* RW */
- unsigned long rsvd_43_63:21;
- } s3;
-};
+ /* UV4 unique struct */
+ struct uv4h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s4;
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE")
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE)
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
-
-
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
-
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
-
-
-union uvh_lb_bau_intd_software_acknowledge_u {
- unsigned long v;
- struct uv2h_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0:1; /* RW */
- unsigned long pending_1:1; /* RW */
- unsigned long pending_2:1; /* RW */
- unsigned long pending_3:1; /* RW */
- unsigned long pending_4:1; /* RW */
- unsigned long pending_5:1; /* RW */
- unsigned long pending_6:1; /* RW */
- unsigned long pending_7:1; /* RW */
- unsigned long timeout_0:1; /* RW */
- unsigned long timeout_1:1; /* RW */
- unsigned long timeout_2:1; /* RW */
- unsigned long timeout_3:1; /* RW */
- unsigned long timeout_4:1; /* RW */
- unsigned long timeout_5:1; /* RW */
- unsigned long timeout_6:1; /* RW */
- unsigned long timeout_7:1; /* RW */
- unsigned long rsvd_16_63:48;
- } s2;
- struct uv3h_lb_bau_intd_software_acknowledge_s {
- unsigned long pending_0:1; /* RW */
- unsigned long pending_1:1; /* RW */
- unsigned long pending_2:1; /* RW */
- unsigned long pending_3:1; /* RW */
- unsigned long pending_4:1; /* RW */
- unsigned long pending_5:1; /* RW */
- unsigned long pending_6:1; /* RW */
- unsigned long pending_7:1; /* RW */
- unsigned long timeout_0:1; /* RW */
- unsigned long timeout_1:1; /* RW */
- unsigned long timeout_2:1; /* RW */
- unsigned long timeout_3:1; /* RW */
- unsigned long timeout_4:1; /* RW */
- unsigned long timeout_5:1; /* RW */
- unsigned long timeout_6:1; /* RW */
- unsigned long timeout_7:1; /* RW */
- unsigned long rsvd_16_63:48;
+ /* UV3 unique struct */
+ struct uv3h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
} s3;
-};
-/* ========================================================================= */
-/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
-/* ========================================================================= */
-#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
-#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
-#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS")
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS ( \
- is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
- is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \
- /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS)
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
-
-
-/* ========================================================================= */
-/* UVH_LB_BAU_MISC_CONTROL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV4H_LB_BAU_MISC_CONTROL 0xc8170UL
-#define UVH_LB_BAU_MISC_CONTROL ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL)
-
-#define UV2H_LB_BAU_MISC_CONTROL_32 0xa10
-#define UV3H_LB_BAU_MISC_CONTROL_32 0xa10
-#define UV4H_LB_BAU_MISC_CONTROL_32 0xa18
-#define UVH_LB_BAU_MISC_CONTROL_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_32)
-
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
-#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
-#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
-#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
-#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
-#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_SHFT 15
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
-#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
-#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
-#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_SHFT 37
-#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
-#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_SHFT 46
-#define UV4H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
-#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
-#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
-#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
-#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_MASK 0x00000000000f8000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
-#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
-#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_MASK 0x0000002000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_MASK 0x0000400000000000UL
-#define UV4H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
-
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK")
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK)
-#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT")
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT)
-#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK")
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK)
-#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT \
- uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT")
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT)
-
-union uvh_lb_bau_misc_control_u {
- unsigned long v;
- struct uvh_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long rsvd_29_47:19;
- unsigned long fun:16; /* RW */
- } s;
- struct uvxh_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long rsvd_36_47:12;
- unsigned long fun:16; /* RW */
- } sx;
- struct uv2h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long enable_intd_soft_ack_mode:1; /* RW */
- unsigned long intd_soft_ack_timeout_period:4; /* RW */
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long rsvd_36_47:12;
- unsigned long fun:16; /* RW */
+ /* UV2 unique struct */
+ struct uv2h_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
} s2;
- struct uv3h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long enable_intd_soft_ack_mode:1; /* RW */
- unsigned long intd_soft_ack_timeout_period:4; /* RW */
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
- unsigned long enable_intd_prefetch_hint:1; /* RW */
- unsigned long thread_kill_timebase:8; /* RW */
- unsigned long rsvd_46_47:2;
- unsigned long fun:16; /* RW */
- } s3;
- struct uv4h_lb_bau_misc_control_s {
- unsigned long rejection_delay:8; /* RW */
- unsigned long apic_mode:1; /* RW */
- unsigned long force_broadcast:1; /* RW */
- unsigned long force_lock_nop:1; /* RW */
- unsigned long qpi_agent_presence_vector:3; /* RW */
- unsigned long descriptor_fetch_mode:1; /* RW */
- unsigned long rsvd_15_19:5;
- unsigned long enable_dual_mapping_mode:1; /* RW */
- unsigned long vga_io_port_decode_enable:1; /* RW */
- unsigned long vga_io_port_16_bit_decode:1; /* RW */
- unsigned long suppress_dest_registration:1; /* RW */
- unsigned long programmed_initial_priority:3; /* RW */
- unsigned long use_incoming_priority:1; /* RW */
- unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long enable_automatic_apic_mode_selection:1;/* RW */
- unsigned long apic_mode_status:1; /* RO */
- unsigned long suppress_interrupts_to_self:1; /* RW */
- unsigned long enable_lock_based_system_flush:1;/* RW */
- unsigned long enable_extended_sb_status:1; /* RW */
- unsigned long suppress_int_prio_udt_to_self:1;/* RW */
- unsigned long use_legacy_descriptor_formats:1;/* RW */
- unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
- unsigned long rsvd_37:1;
- unsigned long thread_kill_timebase:8; /* RW */
- unsigned long address_interleave_select:1; /* RW */
- unsigned long rsvd_47:1;
- unsigned long fun:16; /* RW */
- } s4;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL 0xc8020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
-#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
-#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9c8
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
-
-
-union uvh_lb_bau_sb_activation_control_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_control_s {
- unsigned long index:6; /* RW */
- unsigned long rsvd_6_61:56;
- unsigned long push:1; /* WP */
- unsigned long init:1; /* WP */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0 0xc8030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9d0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
-
-
-union uvh_lb_bau_sb_activation_status_0_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_0_s {
- unsigned long status:64; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1 0xc8040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1)
-
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9d8
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32)
-
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
-
-
-union uvh_lb_bau_sb_activation_status_1_u {
- unsigned long v;
- struct uvh_lb_bau_sb_activation_status_1_s {
- unsigned long status:64; /* RW */
- } s;
};
/* ========================================================================= */
-/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE 0xc8010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE)
-
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9c0
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
-#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL
-#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 53
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000ffffffffff000UL
-#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK)
-
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK)
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
-#define UV2H_NODE_ID 0x0UL
-#define UV3H_NODE_ID 0x0UL
-#define UV4H_NODE_ID 0x0UL
+/* UVH common defines*/
#define UVH_NODE_ID_FORCE1_SHFT 0
-#define UVH_NODE_ID_MANUFACTURER_SHFT 1
-#define UVH_NODE_ID_PART_NUMBER_SHFT 12
-#define UVH_NODE_ID_REVISION_SHFT 28
-#define UVH_NODE_ID_NODE_ID_SHFT 32
#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NI_PORT_SHFT 57
-#define UVXH_NODE_ID_FORCE1_SHFT 0
-#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
-#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
-#define UVXH_NODE_ID_REVISION_SHFT 28
-#define UVXH_NODE_ID_NODE_ID_SHFT 32
-#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UVXH_NODE_ID_NI_PORT_SHFT 57
-#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+/* UVXH common defines */
#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-#define UV2H_NODE_ID_FORCE1_SHFT 0
-#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV2H_NODE_ID_REVISION_SHFT 28
-#define UV2H_NODE_ID_NODE_ID_SHFT 32
-#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV2H_NODE_ID_NI_PORT_SHFT 57
-#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-
-#define UV3H_NODE_ID_FORCE1_SHFT 0
-#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV3H_NODE_ID_REVISION_SHFT 28
-#define UV3H_NODE_ID_NODE_ID_SHFT 32
-#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
-#define UV3H_NODE_ID_RESERVED_2_SHFT 49
-#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV3H_NODE_ID_NI_PORT_SHFT 57
-#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
-#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
-#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
-#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
-
-#define UV4H_NODE_ID_FORCE1_SHFT 0
-#define UV4H_NODE_ID_MANUFACTURER_SHFT 1
-#define UV4H_NODE_ID_PART_NUMBER_SHFT 12
-#define UV4H_NODE_ID_REVISION_SHFT 28
-#define UV4H_NODE_ID_NODE_ID_SHFT 32
+/* UVYH common defines */
+#define UVYH_NODE_ID_NODE_ID_MASK 0x0000007f00000000UL
+#define UVYH_NODE_ID_NI_PORT_MASK 0x7e00000000000000UL
+
+/* UV4 unique defines */
#define UV4H_NODE_ID_ROUTER_SELECT_SHFT 48
-#define UV4H_NODE_ID_RESERVED_2_SHFT 49
-#define UV4H_NODE_ID_NODES_PER_BIT_SHFT 50
-#define UV4H_NODE_ID_NI_PORT_SHFT 57
-#define UV4H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
-#define UV4H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
-#define UV4H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
-#define UV4H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
-#define UV4H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
#define UV4H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV4H_NODE_ID_RESERVED_2_SHFT 49
#define UV4H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
-#define UV4H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
-#define UV4H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_SHFT 49
+#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
union uvh_node_id_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
- unsigned long node_id:15; /* RW */
- unsigned long rsvd_47_63:17;
+ unsigned long rsvd_32_63:32;
} s;
+
+ /* UVXH common struct */
struct uvxh_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -2444,17 +2841,47 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} sx;
- struct uv2h_node_id_s {
+
+ /* UVYH common struct */
+ struct uvyh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:7; /* RW */
+ unsigned long rsvd_39_56:18;
+ unsigned long ni_port:6; /* RO */
+ unsigned long rsvd_63:1;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:7; /* RW */
+ unsigned long rsvd_39_56:18;
+ unsigned long ni_port:6; /* RO */
+ unsigned long rsvd_63:1;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
unsigned long node_id:15; /* RW */
- unsigned long rsvd_47_49:3;
+ unsigned long rsvd_47:1;
+ unsigned long router_select:1; /* RO */
+ unsigned long rsvd_49:1;
unsigned long nodes_per_bit:7; /* RO */
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
- } s2;
+ } s4;
+
+ /* UV3 unique struct */
struct uv3h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -2468,186 +2895,569 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} s3;
- struct uv4h_node_id_s {
+
+ /* UV2 unique struct */
+ struct uv2h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
unsigned long part_number:16; /* RO */
unsigned long revision:4; /* RO */
unsigned long node_id:15; /* RW */
- unsigned long rsvd_47:1;
- unsigned long router_select:1; /* RO */
- unsigned long rsvd_49:1;
+ unsigned long rsvd_47_49:3;
unsigned long nodes_per_bit:7; /* RO */
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
- } s4;
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_PRESENT_0 */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_0 ( \
+ is_uv(UV5) ? 0x1400UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_NODE_PRESENT_0_NODES_SHFT 0
+#define UVYH_NODE_PRESENT_0_NODES_MASK 0xffffffffffffffffUL
+
+
+union uvh_node_present_0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_present_0_s {
+ unsigned long nodes:64; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_PRESENT_1 */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_1 ( \
+ is_uv(UV5) ? 0x1408UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_NODE_PRESENT_1_NODES_SHFT 0
+#define UVYH_NODE_PRESENT_1_NODES_MASK 0xffffffffffffffffUL
+
+
+union uvh_node_present_1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_node_present_1_s {
+ unsigned long nodes:64; /* RW */
+ } s5;
};
/* ========================================================================= */
/* UVH_NODE_PRESENT_TABLE */
/* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE ( \
+ is_uv(UV4) ? 0x1400UL : \
+ is_uv(UV3) ? 0x1400UL : \
+ is_uv(UV2) ? 0x1400UL : \
+ 0)
-#define UV2H_NODE_PRESENT_TABLE_DEPTH 16
-#define UV3H_NODE_PRESENT_TABLE_DEPTH 16
-#define UV4H_NODE_PRESENT_TABLE_DEPTH 4
#define UVH_NODE_PRESENT_TABLE_DEPTH ( \
- is_uv2_hub() ? UV2H_NODE_PRESENT_TABLE_DEPTH : \
- is_uv3_hub() ? UV3H_NODE_PRESENT_TABLE_DEPTH : \
- /*is_uv4_hub*/ UV4H_NODE_PRESENT_TABLE_DEPTH)
+ is_uv(UV4) ? 4 : \
+ is_uv(UV3) ? 16 : \
+ is_uv(UV2) ? 16 : \
+ 0)
-#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
-#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
+
+/* UVXH common defines */
+#define UVXH_NODE_PRESENT_TABLE_NODES_SHFT 0
+#define UVXH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
union uvh_node_present_table_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_node_present_table_s {
unsigned long nodes:64; /* RW */
} s;
+
+ /* UVXH common struct */
+ struct uvxh_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_RH10_GAM_ADDR_MAP_CONFIG ( \
+ is_uv(UV5) ? 0x470000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000001c0UL
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_SHFT 12
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_MASK 0x0000000000001000UL
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_SHFT 16
+#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_MASK 0x00000000000f0000UL
+
+
+union uvh_rh10_gam_addr_map_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ unsigned long rsvd_20_63:44;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ unsigned long rsvd_20_63:44;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_addr_map_config_s {
+ unsigned long undef_0_5:6; /* Undefined */
+ unsigned long n_skt:3; /* RW */
+ unsigned long undef_9_11:3; /* Undefined */
+ unsigned long ls_enable:1; /* RW */
+ unsigned long undef_13_15:3; /* Undefined */
+ unsigned long mk_tme_keyid_bits:4; /* RW */
+ } s5;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
+/* UVH_RH10_GAM_GRU_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x4800c8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG ( \
+ is_uv(UV5) ? 0x4700b0UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 25
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x0070000000000000UL
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffe000000UL : \
+ 0)
+#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV5) ? 25 : \
+ -1)
+
+union uvh_rh10_gam_gru_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
- } sx;
- struct uv2h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_gru_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long n_gru:3; /* RW */
+ unsigned long undef_55_62:8; /* Undefined */
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 ( \
+ is_uv(UV5) ? 0x473000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffc000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \
+ is_uv(UV5) ? 26 : \
+ -1)
+
+union uvh_rh10_gam_mmioh_overlay_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_alias210_overlay_config_0_mmr_s {
- unsigned long rsvd_0_23:24;
- unsigned long base:8; /* RW */
- unsigned long rsvd_32_47:16;
- unsigned long m_alias:5; /* RW */
- unsigned long rsvd_53_62:10;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 ( \
+ is_uv(UV5) ? 0x474000UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffc000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \
+ is_uv(UV5) ? 26 : \
+ -1)
+
+union uvh_rh10_gam_mmioh_overlay_config1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 ( \
+ is_uv(UV5) ? 0x473800UL : \
+ 0)
+
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \
+ is_uv(UV5) ? 128 : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x000000000000007fUL
+
+
+union uvh_rh10_gam_mmioh_redirect_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 ( \
+ is_uv(UV5) ? 0x474800UL : \
+ 0)
+
+#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \
+ is_uv(UV5) ? 128 : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x000000000000007fUL
+
+
+union uvh_rh10_gam_mmioh_redirect_config1_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:7; /* RW */
+ unsigned long rsvd_7_63:57;
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH10_GAM_MMR_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG ( \
+ is_uv(UV5) ? 0x470090UL : \
+ 0)
+
+
+/* UVYH common defines */
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 25
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV5) ? 0x000ffffffe000000UL : \
+ 0)
+#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV5) ? 25 : \
+ -1)
+
+union uvh_rh10_gam_mmr_overlay_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVYH common struct */
+ struct uvyh_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } sy;
+
+ /* UV5 unique struct */
+ struct uv5h_rh10_gam_mmr_overlay_config_s {
+ unsigned long undef_0_24:25; /* Undefined */
+ unsigned long base:27; /* RW */
+ unsigned long undef_52_62:11; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s5;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_RH_GAM_ADDR_MAP_CONFIG ( \
+ is_uv(UV4) ? 0x480000UL : \
+ is_uv(UV3) ? 0x1600000UL : \
+ is_uv(UV2) ? 0x1600000UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6
+#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000003c0UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+
+
+union uvh_rh_gam_addr_map_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_addr_map_config_s {
+ unsigned long rsvd_0_5:6;
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_addr_map_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_addr_map_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
+/* UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x4800d8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+#define UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800c8UL : \
+ is_uv(UV3) ? 0x16000c8UL : \
+ is_uv(UV2) ? 0x16000c8UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+
+union uvh_rh_gam_alias_0_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2655,7 +3465,9 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2663,15 +3475,19 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_1_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2679,66 +3495,96 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_overlay_config_1_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_0_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800d0UL : \
+ is_uv(UV3) ? 0x16000d0UL : \
+ is_uv(UV2) ? 0x16000d0UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
+
+
+union uvh_rh_gam_alias_0_redirect_config_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
} s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_0_redirect_config_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
+/* UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x4800e8UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR)
-
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
-#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+#define UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800d8UL : \
+ is_uv(UV3) ? 0x16000d8UL : \
+ is_uv(UV2) ? 0x16000d8UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+
+union uvh_rh_gam_alias_1_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2746,7 +3592,9 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2754,15 +3602,19 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
@@ -2770,321 +3622,289 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_1_overlay_config_s {
unsigned long rsvd_0_23:24;
unsigned long base:8; /* RW */
unsigned long rsvd_32_47:16;
unsigned long m_alias:5; /* RW */
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
+/* UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x4800d0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800e0UL : \
+ is_uv(UV3) ? 0x16000e0UL : \
+ is_uv(UV2) ? 0x16000e0UL : \
+ 0)
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
-union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+union uvh_rh_gam_alias_1_redirect_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_0_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_0_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_1_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
+/* UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x4800e0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x4800e8UL : \
+ is_uv(UV3) ? 0x16000e8UL : \
+ is_uv(UV2) ? 0x16000e8UL : \
+ 0)
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+union uvh_rh_gam_alias_2_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_1_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_2_overlay_config_s {
unsigned long rsvd_0_23:24;
- unsigned long dest_base:22; /* RW */
- unsigned long rsvd_46_63:18;
- } s4;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
+/* UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x4800f0UL
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR)
-
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG ( \
+ is_uv(UV4) ? 0x4800f0UL : \
+ is_uv(UV3) ? 0x16000f0UL : \
+ is_uv(UV2) ? 0x16000f0UL : \
+ 0)
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
-#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+/* UVXH common defines */
+#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL
-union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+union uvh_rh_gam_alias_2_redirect_config_u {
unsigned long v;
- struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
- struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} sx;
- struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s2;
- struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s3;
- struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_alias_2_redirect_config_s {
unsigned long rsvd_0_23:24;
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
- } s4;
-};
-
-/* ========================================================================= */
-/* UVH_RH_GAM_CONFIG_MMR */
-/* ========================================================================= */
-#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
-#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
-#define UV4H_RH_GAM_CONFIG_MMR 0x480000UL
-#define UVH_RH_GAM_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_CONFIG_MMR)
-
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
-#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
-#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
-#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
-
-
-union uvh_rh_gam_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s;
- struct uvxh_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } sx;
- struct uv2h_rh_gam_config_mmr_s {
- unsigned long m_skt:6; /* RW */
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
} s2;
- struct uv3h_rh_gam_config_mmr_s {
- unsigned long m_skt:6; /* RW */
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s3;
- struct uv4h_rh_gam_config_mmr_s {
- unsigned long rsvd_0_5:6;
- unsigned long n_skt:4; /* RW */
- unsigned long rsvd_10_63:54;
- } s4;
};
/* ========================================================================= */
-/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
+/* UVH_RH_GAM_GRU_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x480010UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR)
-
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
-#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
-#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK)
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT ( \
- is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
- is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT)
-
-union uvh_rh_gam_gru_overlay_config_mmr_u {
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x480010UL : \
+ is_uv(UV3) ? 0x1600010UL : \
+ is_uv(UV2) ? 0x1600010UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffffc000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_SHFT 62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_MASK 0x4000000000000000UL
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffff0000000UL : \
+ is_uv(UV2) ? 0x00003ffff0000000UL : \
+ 0)
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 28 : \
+ is_uv(UV2) ? 28 : \
+ -1)
+
+union uvh_rh_gam_gru_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_51:52;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_45:46;
+ unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_gru_overlay_config_s {
unsigned long rsvd_0_45:46;
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_27:28;
- unsigned long base:18; /* RW */
+
+ /* UV4A unique struct */
+ struct uv4ah_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_24:25;
+ unsigned long undef_25:1; /* Undefined */
+ unsigned long base:26; /* RW */
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_24:25;
+ unsigned long undef_25:1; /* Undefined */
+ unsigned long base:20; /* RW */
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_gru_overlay_config_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
unsigned long rsvd_46_51:6;
@@ -3093,86 +3913,141 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long mode:1; /* RW */
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_gru_overlay_config_mmr_s {
- unsigned long rsvd_0_24:25;
- unsigned long undef_25:1; /* Undefined */
- unsigned long base:20; /* RW */
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_gru_overlay_config_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
unsigned long rsvd_46_51:6;
unsigned long n_gru:4; /* RW */
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR)
-
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 52
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x000ffffffc000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x03f0000000000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)
-
-union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG ( \
+ is_uv(UV2) ? 0x1600030UL : \
+ 0)
+
+
+
+/* UV2 unique defines */
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_SHFT 52
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV2) ? 27 : \
+ uv_undefined("UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT"))
+
+union uvh_rh_gam_mmioh_overlay_config_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } sx;
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_mmioh_overlay_config_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 ( \
+ is_uv(UV4) ? 0x483000UL : \
+ is_uv(UV3) ? 0x1603000UL : \
+ 0)
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmioh_overlay_config0_u {
+ unsigned long v;
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config0_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_mmioh_overlay_config0_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config0_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } sx;
+
+ /* UV4A unique struct */
struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:26; /* RW */
@@ -3181,71 +4056,94 @@ union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_overlay_config0_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x484000UL
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR)
-
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 52
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x000ffffffc000000UL
-#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x03f0000000000000UL
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK)
-
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
-
-union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 ( \
+ is_uv(UV4) ? 0x484000UL : \
+ is_uv(UV3) ? 0x1604000UL : \
+ 0)
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmioh_overlay_config1_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_overlay_config1_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s3;
- struct uv4h_rh_gam_mmioh_overlay_config1_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_overlay_config1_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s4;
+ } sx;
+
+ /* UV4A unique struct */
struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:26; /* RW */
@@ -3254,232 +4152,275 @@ union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
unsigned long undef_62:1; /* Undefined */
unsigned long enable:1; /* RW */
} s4a;
-};
-/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
-/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
-#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR)
-
-
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
-#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_mmioh_overlay_config_mmr_u {
- unsigned long v;
- struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
- unsigned long rsvd_0_26:27;
- unsigned long base:19; /* RW */
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
unsigned long m_io:6; /* RW */
- unsigned long n_io:4; /* RW */
+ unsigned long n_io:4;
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
- } s2;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_overlay_config1_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 ( \
+ is_uv(UV4) ? 0x483800UL : \
+ is_uv(UV3) ? 0x1603800UL : \
+ 0)
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \
+ is_uv(UV4) ? 128 : \
+ is_uv(UV3) ? 128 : \
+ 0)
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL
-#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000000fffUL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK)
-
-union uvh_rh_gam_mmioh_redirect_config0_mmr_u {
+union uvh_rh_gam_mmioh_redirect_config0_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s3;
- struct uv4h_rh_gam_mmioh_redirect_config0_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s4;
- struct uv4ah_rh_gam_mmioh_redirect_config0_mmr_s {
+ } sx;
+
+ struct uv4ah_rh_gam_mmioh_redirect_config0_s {
unsigned long nasid:12; /* RW */
unsigned long rsvd_12_63:52;
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_redirect_config0_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR)
-
-#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 ( \
+ is_uv(UV4) ? 0x484800UL : \
+ is_uv(UV3) ? 0x1604800UL : \
+ 0)
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \
+ is_uv(UV4) ? 128 : \
+ is_uv(UV3) ? 128 : \
+ 0)
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+/* UV4 unique defines */
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL
-#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000000fffUL
+/* UV3 unique defines */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL
-#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK ( \
- is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
- is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK)
-union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
+union uvh_rh_gam_mmioh_redirect_config1_u {
unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s3;
- struct uv4h_rh_gam_mmioh_redirect_config1_mmr_s {
+ } s;
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:15; /* RW */
unsigned long rsvd_15_63:49;
- } s4;
- struct uv4ah_rh_gam_mmioh_redirect_config1_mmr_s {
+ } sx;
+
+ struct uv4ah_rh_gam_mmioh_redirect_config1_s {
unsigned long nasid:12; /* RW */
unsigned long rsvd_12_63:52;
} s4a;
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmioh_redirect_config1_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
};
/* ========================================================================= */
-/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
+/* UVH_RH_GAM_MMR_OVERLAY_CONFIG */
/* ========================================================================= */
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x480028UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR ( \
- is_uv2_hub() ? UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
- is_uv3_hub() ? UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \
- /*is_uv4_hub*/ UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR)
-
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-
-union uvh_rh_gam_mmr_overlay_config_mmr_u {
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG ( \
+ is_uv(UV4) ? 0x480028UL : \
+ is_uv(UV3) ? 0x1600028UL : \
+ is_uv(UV2) ? 0x1600028UL : \
+ 0)
+
+
+/* UVXH common defines */
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ is_uv(UV2) ? 0x00003ffffc000000UL : \
+ 0)
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+/* UV4A unique defines */
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26
+#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \
+ is_uv(UV4A) ? 0x000ffffffc000000UL : \
+ is_uv(UV4) ? 0x00003ffffc000000UL : \
+ is_uv(UV3) ? 0x00003ffffc000000UL : \
+ is_uv(UV2) ? 0x00003ffffc000000UL : \
+ 0)
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \
+ is_uv(UV4) ? 26 : \
+ is_uv(UV3) ? 26 : \
+ is_uv(UV2) ? 26 : \
+ -1)
+
+union uvh_rh_gam_mmr_overlay_config_u {
unsigned long v;
- struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UVH common struct */
+ struct uvh_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s;
- struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UVXH common struct */
+ struct uvxh_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} sx;
- struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UV4 unique struct */
+ struct uv4h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
- } s2;
- struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s3;
- struct uv4h_rh_gam_mmr_overlay_config_mmr_s {
+
+ /* UV2 unique struct */
+ struct uv2h_rh_gam_mmr_overlay_config_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
- } s4;
+ } s2;
};
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UV2H_RTC 0x340000UL
-#define UV3H_RTC 0x340000UL
-#define UV4H_RTC 0xe0000UL
#define UVH_RTC ( \
- is_uv2_hub() ? UV2H_RTC : \
- is_uv3_hub() ? UV3H_RTC : \
- /*is_uv4_hub*/ UV4H_RTC)
+ is_uv(UV5) ? 0xe0000UL : \
+ is_uv(UV4) ? 0xe0000UL : \
+ is_uv(UV3) ? 0x340000UL : \
+ is_uv(UV2) ? 0x340000UL : \
+ 0)
+/* UVH common defines*/
#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
union uvh_rtc_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_rtc_s {
unsigned long real_time_clock:56; /* RW */
unsigned long rsvd_56_63:8;
} s;
+
+ /* UV5 unique struct */
+ struct uv5h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s5;
+
+ /* UV4 unique struct */
+ struct uv4h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s4;
+
+ /* UV3 unique struct */
+ struct uv3h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s3;
+
+ /* UV2 unique struct */
+ struct uv2h_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s2;
};
/* ========================================================================= */
@@ -3487,26 +4428,29 @@ union uvh_rtc_u {
/* ========================================================================= */
#define UVH_RTC1_INT_CONFIG 0x615c0UL
+/* UVH common defines*/
#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC1_INT_CONFIG_P_SHFT 13
-#define UVH_RTC1_INT_CONFIG_T_SHFT 15
-#define UVH_RTC1_INT_CONFIG_M_SHFT 16
-#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
union uvh_rtc1_int_config_u {
unsigned long v;
+
+ /* UVH common struct */
struct uvh_rtc1_int_config_s {
unsigned long vector_:8; /* RW */
unsigned long dm:3; /* RW */
@@ -3519,591 +4463,175 @@ union uvh_rtc1_int_config_u {
unsigned long rsvd_17_31:15;
unsigned long apic_id:32; /* RW */
} s;
-};
-
-/* ========================================================================= */
-/* UVH_SCRATCH5 */
-/* ========================================================================= */
-#define UV2H_SCRATCH5 0x2d0200UL
-#define UV3H_SCRATCH5 0x2d0200UL
-#define UV4H_SCRATCH5 0xb0200UL
-#define UVH_SCRATCH5 ( \
- is_uv2_hub() ? UV2H_SCRATCH5 : \
- is_uv3_hub() ? UV3H_SCRATCH5 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5)
-
-#define UV2H_SCRATCH5_32 0x778
-#define UV3H_SCRATCH5_32 0x778
-#define UV4H_SCRATCH5_32 0x798
-#define UVH_SCRATCH5_32 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_32 : \
- is_uv3_hub() ? UV3H_SCRATCH5_32 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_32)
-
-#define UVH_SCRATCH5_SCRATCH5_SHFT 0
-#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-
-
-union uvh_scratch5_u {
- unsigned long v;
- struct uvh_scratch5_s {
- unsigned long scratch5:64; /* RW, W1CS */
- } s;
-};
-/* ========================================================================= */
-/* UVH_SCRATCH5_ALIAS */
-/* ========================================================================= */
-#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV4H_SCRATCH5_ALIAS 0xb0208UL
-#define UVH_SCRATCH5_ALIAS ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS)
-
-#define UV2H_SCRATCH5_ALIAS_32 0x780
-#define UV3H_SCRATCH5_ALIAS_32 0x780
-#define UV4H_SCRATCH5_ALIAS_32 0x7a0
-#define UVH_SCRATCH5_ALIAS_32 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_32 : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_32 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_32)
-
-
-/* ========================================================================= */
-/* UVH_SCRATCH5_ALIAS_2 */
-/* ========================================================================= */
-#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
-#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
-#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
-#define UVH_SCRATCH5_ALIAS_2 ( \
- is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_2 : \
- is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_2 : \
- /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_2)
-#define UVH_SCRATCH5_ALIAS_2_32 0x788
-
-
-/* ========================================================================= */
-/* UVXH_EVENT_OCCURRED2 */
-/* ========================================================================= */
-#define UVXH_EVENT_OCCURRED2 0x70100UL
-
-#define UV2H_EVENT_OCCURRED2_32 0xb68
-#define UV3H_EVENT_OCCURRED2_32 0xb68
-#define UV4H_EVENT_OCCURRED2_32 0x608
-#define UVH_EVENT_OCCURRED2_32 ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_32 : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_32 : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_32)
-
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
-
-#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+ /* UV5 unique struct */
+ struct uv5h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s5;
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15
-#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16
-#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17
-#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18
-#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19
-#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20
-#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21
-#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22
-#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23
-#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24
-#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25
-#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26
-#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27
-#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28
-#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29
-#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30
-#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31
-#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32
-#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33
-#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34
-#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35
-#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36
-#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37
-#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38
-#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39
-#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40
-#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41
-#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42
-#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43
-#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44
-#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45
-#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46
-#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47
-#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48
-#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL
-#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL
-#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL
-#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL
-#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL
-#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL
-#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL
-#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL
-#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL
-#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL
-#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL
-#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL
+ /* UV4 unique struct */
+ struct uv4h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s4;
-#define UVXH_EVENT_OCCURRED2_RTC_1_MASK ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_RTC_1_MASK : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_RTC_1_MASK : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_RTC_1_MASK)
+ /* UV3 unique struct */
+ struct uv3h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s3;
-union uvh_event_occurred2_u {
- unsigned long v;
- struct uv2h_event_occurred2_s {
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_32_63:32;
+ /* UV2 unique struct */
+ struct uv2h_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
} s2;
- struct uv3h_event_occurred2_s {
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_32_63:32;
- } s3;
- struct uv4h_event_occurred2_s {
- unsigned long message_accelerator_int0:1; /* RW */
- unsigned long message_accelerator_int1:1; /* RW */
- unsigned long message_accelerator_int2:1; /* RW */
- unsigned long message_accelerator_int3:1; /* RW */
- unsigned long message_accelerator_int4:1; /* RW */
- unsigned long message_accelerator_int5:1; /* RW */
- unsigned long message_accelerator_int6:1; /* RW */
- unsigned long message_accelerator_int7:1; /* RW */
- unsigned long message_accelerator_int8:1; /* RW */
- unsigned long message_accelerator_int9:1; /* RW */
- unsigned long message_accelerator_int10:1; /* RW */
- unsigned long message_accelerator_int11:1; /* RW */
- unsigned long message_accelerator_int12:1; /* RW */
- unsigned long message_accelerator_int13:1; /* RW */
- unsigned long message_accelerator_int14:1; /* RW */
- unsigned long message_accelerator_int15:1; /* RW */
- unsigned long rtc_interval_int:1; /* RW */
- unsigned long bau_dashboard_int:1; /* RW */
- unsigned long rtc_0:1; /* RW */
- unsigned long rtc_1:1; /* RW */
- unsigned long rtc_2:1; /* RW */
- unsigned long rtc_3:1; /* RW */
- unsigned long rtc_4:1; /* RW */
- unsigned long rtc_5:1; /* RW */
- unsigned long rtc_6:1; /* RW */
- unsigned long rtc_7:1; /* RW */
- unsigned long rtc_8:1; /* RW */
- unsigned long rtc_9:1; /* RW */
- unsigned long rtc_10:1; /* RW */
- unsigned long rtc_11:1; /* RW */
- unsigned long rtc_12:1; /* RW */
- unsigned long rtc_13:1; /* RW */
- unsigned long rtc_14:1; /* RW */
- unsigned long rtc_15:1; /* RW */
- unsigned long rtc_16:1; /* RW */
- unsigned long rtc_17:1; /* RW */
- unsigned long rtc_18:1; /* RW */
- unsigned long rtc_19:1; /* RW */
- unsigned long rtc_20:1; /* RW */
- unsigned long rtc_21:1; /* RW */
- unsigned long rtc_22:1; /* RW */
- unsigned long rtc_23:1; /* RW */
- unsigned long rtc_24:1; /* RW */
- unsigned long rtc_25:1; /* RW */
- unsigned long rtc_26:1; /* RW */
- unsigned long rtc_27:1; /* RW */
- unsigned long rtc_28:1; /* RW */
- unsigned long rtc_29:1; /* RW */
- unsigned long rtc_30:1; /* RW */
- unsigned long rtc_31:1; /* RW */
- unsigned long rsvd_50_63:14;
- } s4;
};
/* ========================================================================= */
-/* UVXH_EVENT_OCCURRED2_ALIAS */
+/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
-
-#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
-#define UV3H_EVENT_OCCURRED2_ALIAS_32 0xb70
-#define UV4H_EVENT_OCCURRED2_ALIAS_32 0x610
-#define UVH_EVENT_OCCURRED2_ALIAS_32 ( \
- is_uv2_hub() ? UV2H_EVENT_OCCURRED2_ALIAS_32 : \
- is_uv3_hub() ? UV3H_EVENT_OCCURRED2_ALIAS_32 : \
- /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_ALIAS_32)
+#define UVH_SCRATCH5 ( \
+ is_uv(UV5) ? 0xb0200UL : \
+ is_uv(UV4) ? 0xb0200UL : \
+ is_uv(UV3) ? 0x2d0200UL : \
+ is_uv(UV2) ? 0x2d0200UL : \
+ 0)
+#define UV5H_SCRATCH5 0xb0200UL
+#define UV4H_SCRATCH5 0xb0200UL
+#define UV3H_SCRATCH5 0x2d0200UL
+#define UV2H_SCRATCH5 0x2d0200UL
+/* UVH common defines*/
+#define UVH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-/* ========================================================================= */
-/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
-/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2 0xc8130UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2)
+/* UVXH common defines */
+#define UVXH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVXH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0xa10
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2_32 ( \
- is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \
- is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \
- /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32)
+/* UVYH common defines */
+#define UVYH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVYH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV5 unique defines */
+#define UV5H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV5H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV4 unique defines */
+#define UV4H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV4H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV3 unique defines */
+#define UV3H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV3H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
-#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+/* UV2 unique defines */
+#define UV2H_SCRATCH5_SCRATCH5_SHFT 0
+#define UV2H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
-union uvxh_lb_bau_sb_activation_status_2_u {
+union uvh_scratch5_u {
unsigned long v;
- struct uvxh_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } sx;
- struct uv2h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s2;
- struct uv3h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s3;
- struct uv4h_lb_bau_sb_activation_status_2_s {
- unsigned long aux_error:64; /* RW */
- } s4;
-};
-/* ========================================================================= */
-/* UV3H_GR0_GAM_GR_CONFIG */
-/* ========================================================================= */
-#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL
-
-#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
-#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
-#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+ /* UVH common struct */
+ struct uvh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s;
-union uv3h_gr0_gam_gr_config_u {
- unsigned long v;
- struct uv3h_gr0_gam_gr_config_s {
- unsigned long m_skt:6; /* RW */
- unsigned long undef_6_9:4; /* Undefined */
- unsigned long subspace:1; /* RW */
- unsigned long reserved:53;
- } s3;
-};
+ /* UVXH common struct */
+ struct uvxh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } sx;
-/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_QUEUE_FIRST */
-/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST 0xa4100UL
+ /* UVYH common struct */
+ struct uvyh_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } sy;
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_SHFT 6
-#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffc0UL
+ /* UV5 unique struct */
+ struct uv5h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s5;
-union uv4h_lb_proc_intd_queue_first_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_queue_first_s {
- unsigned long undef_0_5:6; /* Undefined */
- unsigned long first_payload_address:40; /* RW */
+ /* UV4 unique struct */
+ struct uv4h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
} s4;
-};
-/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_QUEUE_LAST */
-/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_QUEUE_LAST 0xa4108UL
-
-#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_SHFT 5
-#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffe0UL
+ /* UV3 unique struct */
+ struct uv3h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s3;
-union uv4h_lb_proc_intd_queue_last_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_queue_last_s {
- unsigned long undef_0_4:5; /* Undefined */
- unsigned long last_payload_address:41; /* RW */
- } s4;
+ /* UV2 unique struct */
+ struct uv2h_scratch5_s {
+ unsigned long scratch5:64; /* RW */
+ } s2;
};
/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR */
+/* UVH_SCRATCH5_ALIAS */
/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR 0xa4118UL
+#define UVH_SCRATCH5_ALIAS ( \
+ is_uv(UV5) ? 0xb0208UL : \
+ is_uv(UV4) ? 0xb0208UL : \
+ is_uv(UV3) ? 0x2d0208UL : \
+ is_uv(UV2) ? 0x2d0208UL : \
+ 0)
+#define UV5H_SCRATCH5_ALIAS 0xb0208UL
+#define UV4H_SCRATCH5_ALIAS 0xb0208UL
+#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
+#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_SHFT 0
-#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_MASK 0x00000000000000ffUL
-
-union uv4h_lb_proc_intd_soft_ack_clear_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_soft_ack_clear_s {
- unsigned long soft_ack_pending_flags:8; /* WP */
- } s4;
-};
/* ========================================================================= */
-/* UV4H_LB_PROC_INTD_SOFT_ACK_PENDING */
+/* UVH_SCRATCH5_ALIAS_2 */
/* ========================================================================= */
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING 0xa4110UL
-
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_SHFT 0
-#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_MASK 0x00000000000000ffUL
+#define UVH_SCRATCH5_ALIAS_2 ( \
+ is_uv(UV5) ? 0xb0210UL : \
+ is_uv(UV4) ? 0xb0210UL : \
+ is_uv(UV3) ? 0x2d0210UL : \
+ is_uv(UV2) ? 0x2d0210UL : \
+ 0)
+#define UV5H_SCRATCH5_ALIAS_2 0xb0210UL
+#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
+#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
-union uv4h_lb_proc_intd_soft_ack_pending_u {
- unsigned long v;
- struct uv4h_lb_proc_intd_soft_ack_pending_s {
- unsigned long soft_ack_flags:8; /* RW */
- } s4;
-};
#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c3daf0aaa0ee..cdaab30880b9 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -239,7 +239,7 @@ void __init arch_init_ideal_nops(void)
return;
}
- /* fall through */
+ fallthrough;
default:
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 21325a4a78b9..21f9c7f11779 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -800,7 +800,7 @@ static int irq_polarity(int idx)
return IOAPIC_POL_HIGH;
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- /* fall through */
+ fallthrough;
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
@@ -848,7 +848,7 @@ static int irq_trigger(int idx)
return IOAPIC_EDGE;
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- /* fall through */
+ fallthrough;
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
@@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
legacy_pic->init(0);
legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT);
+ legacy_pic->unmask(0);
unlock_ExtINT_logic();
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 7bda71def557..99ee61c9ba54 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -149,7 +149,7 @@ void __init default_setup_apic_routing(void)
break;
}
/* P4 and above */
- /* fall through */
+ fallthrough;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index dae32d948bf2..f8a56b5dc29f 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -161,6 +161,7 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
apicd->move_in_progress = true;
apicd->prev_vector = apicd->vector;
apicd->prev_cpu = apicd->cpu;
+ WARN_ON_ONCE(apicd->cpu == newcpu);
} else {
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
managed);
@@ -910,7 +911,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
__send_cleanup_vector(apicd);
}
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+void irq_complete_move(struct irq_cfg *cfg)
{
struct apic_chip_data *apicd;
@@ -918,15 +919,16 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
if (likely(!apicd->move_in_progress))
return;
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
+ /*
+ * If the interrupt arrived on the new target CPU, cleanup the
+ * vector on the old target CPU. A vector check is not required
+ * because an interrupt can never move from one vector to another
+ * on the same CPU.
+ */
+ if (apicd->cpu == smp_processor_id())
__send_cleanup_vector(apicd);
}
-void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
/*
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
*/
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 0b6eea3f54e6..714233cee0b5 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,6 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/crash_dump.h>
@@ -29,19 +30,24 @@ static int uv_hubbed_system;
static int uv_hubless_system;
static u64 gru_start_paddr, gru_end_paddr;
static union uvh_apicid uvh_apicid;
+static int uv_node_id;
-/* Unpack OEM/TABLE ID's to be NULL terminated strings */
+/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */
+static u8 uv_archtype[UV_AT_SIZE];
static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
-/* Information derived from CPUID: */
+/* Information derived from CPUID and some UV MMRs */
static struct {
unsigned int apicid_shift;
unsigned int apicid_mask;
unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */
unsigned int pnode_mask;
+ unsigned int nasid_shift;
unsigned int gpa_shift;
unsigned int gnode_shift;
+ unsigned int m_skt;
+ unsigned int n_skt;
} uv_cpuid;
static int uv_min_hub_revision_id;
@@ -77,6 +83,9 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr)
static inline bool is_GRU_range(u64 start, u64 end)
{
+ if (!gru_start_paddr)
+ return false;
+
return start >= gru_start_paddr && end <= gru_end_paddr;
}
@@ -85,43 +94,102 @@ static bool uv_is_untracked_pat_range(u64 start, u64 end)
return is_ISA_range(start, end) || is_GRU_range(start, end);
}
-static int __init early_get_pnodeid(void)
+static void __init early_get_pnodeid(void)
{
- union uvh_node_id_u node_id;
- union uvh_rh_gam_config_mmr_u m_n_config;
int pnode;
- /* Currently, all blades have same revision number */
+ uv_cpuid.m_skt = 0;
+ if (UVH_RH10_GAM_ADDR_MAP_CONFIG) {
+ union uvh_rh10_gam_addr_map_config_u m_n_config;
+
+ m_n_config.v = uv_early_read_mmr(UVH_RH10_GAM_ADDR_MAP_CONFIG);
+ uv_cpuid.n_skt = m_n_config.s.n_skt;
+ uv_cpuid.nasid_shift = 0;
+ } else if (UVH_RH_GAM_ADDR_MAP_CONFIG) {
+ union uvh_rh_gam_addr_map_config_u m_n_config;
+
+ m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_ADDR_MAP_CONFIG);
+ uv_cpuid.n_skt = m_n_config.s.n_skt;
+ if (is_uv(UV3))
+ uv_cpuid.m_skt = m_n_config.s3.m_skt;
+ if (is_uv(UV2))
+ uv_cpuid.m_skt = m_n_config.s2.m_skt;
+ uv_cpuid.nasid_shift = 1;
+ } else {
+ unsigned long GAM_ADDR_MAP_CONFIG = 0;
+
+ WARN(GAM_ADDR_MAP_CONFIG == 0,
+ "UV: WARN: GAM_ADDR_MAP_CONFIG is not available\n");
+ uv_cpuid.n_skt = 0;
+ uv_cpuid.nasid_shift = 0;
+ }
+
+ if (is_uv(UV4|UVY))
+ uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
+
+ uv_cpuid.pnode_mask = (1 << uv_cpuid.n_skt) - 1;
+ pnode = (uv_node_id >> uv_cpuid.nasid_shift) & uv_cpuid.pnode_mask;
+ uv_cpuid.gpa_shift = 46; /* Default unless changed */
+
+ pr_info("UV: n_skt:%d pnmsk:%x pn:%x\n",
+ uv_cpuid.n_skt, uv_cpuid.pnode_mask, pnode);
+}
+
+/* Running on a UV Hubbed system, determine which UV Hub Type it is */
+static int __init early_set_hub_type(void)
+{
+ union uvh_node_id_u node_id;
+
+ /*
+ * The NODE_ID MMR is always at offset 0.
+ * Contains the chip part # + revision.
+ * Node_id field started with 15 bits,
+ * ... now 7 but upper 8 are masked to 0.
+ * All blades/nodes have the same part # and hub revision.
+ */
node_id.v = uv_early_read_mmr(UVH_NODE_ID);
- m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
- uv_min_hub_revision_id = node_id.s.revision;
+ uv_node_id = node_id.sx.node_id;
switch (node_id.s.part_number) {
- case UV2_HUB_PART_NUMBER:
- case UV2_HUB_PART_NUMBER_X:
- uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+
+ case UV5_HUB_PART_NUMBER:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV5_HUB_REVISION_BASE;
+ uv_hub_type_set(UV5);
break;
+
+ /* UV4/4A only have a revision difference */
+ case UV4_HUB_PART_NUMBER:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV4_HUB_REVISION_BASE;
+ uv_hub_type_set(UV4);
+ if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
+ uv_hub_type_set(UV4|UV4A);
+ break;
+
case UV3_HUB_PART_NUMBER:
case UV3_HUB_PART_NUMBER_X:
- uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV3_HUB_REVISION_BASE;
+ uv_hub_type_set(UV3);
break;
- /* Update: UV4A has only a modified revision to indicate HUB fixes */
- case UV4_HUB_PART_NUMBER:
- uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
- uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
+ case UV2_HUB_PART_NUMBER:
+ case UV2_HUB_PART_NUMBER_X:
+ uv_min_hub_revision_id = node_id.s.revision
+ + UV2_HUB_REVISION_BASE - 1;
+ uv_hub_type_set(UV2);
break;
+
+ default:
+ return 0;
}
- uv_hub_info->hub_revision = uv_min_hub_revision_id;
- uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
- pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
- uv_cpuid.gpa_shift = 46; /* Default unless changed */
+ pr_info("UV: part#:%x rev:%d rev_id:%d UVtype:0x%x\n",
+ node_id.s.part_number, node_id.s.revision,
+ uv_min_hub_revision_id, is_uv(~0));
- pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
- node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
- m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode);
- return pnode;
+ return 1;
}
static void __init uv_tsc_check_sync(void)
@@ -130,38 +198,41 @@ static void __init uv_tsc_check_sync(void)
int sync_state;
int mmr_shift;
char *state;
- bool valid;
- /* Accommodate different UV arch BIOSes */
+ /* Different returns from different UV BIOS versions */
mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
mmr_shift =
is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
+ /* Check if TSC is valid for all sockets */
switch (sync_state) {
case UVH_TSC_SYNC_VALID:
state = "in sync";
- valid = true;
+ mark_tsc_async_resets("UV BIOS");
break;
- case UVH_TSC_SYNC_INVALID:
- state = "unstable";
- valid = false;
+ /* If BIOS state unknown, don't do anything */
+ case UVH_TSC_SYNC_UNKNOWN:
+ state = "unknown";
break;
+
+ /* Otherwise, BIOS indicates problem with TSC */
default:
- state = "unknown: assuming valid";
- valid = true;
+ state = "unstable";
+ mark_tsc_unstable("UV BIOS");
break;
}
pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state);
-
- /* Mark flag that says TSC != 0 is valid for socket 0 */
- if (valid)
- mark_tsc_async_resets("UV BIOS");
- else
- mark_tsc_unstable("UV BIOS");
}
+/* Selector for (4|4A|5) structs */
+#define uvxy_field(sname, field, undef) ( \
+ is_uv(UV4A) ? sname.s4a.field : \
+ is_uv(UV4) ? sname.s4.field : \
+ is_uv(UV3) ? sname.s3.field : \
+ undef)
+
/* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
#define SMT_LEVEL 0 /* Leaf 0xb SMT level */
@@ -221,29 +292,110 @@ static void __init uv_stringify(int len, char *to, char *from)
strncpy(to, from, len-1);
}
-static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
+/* Find UV arch type entry in UVsystab */
+static unsigned long __init early_find_archtype(struct uv_systab *st)
+{
+ int i;
+
+ for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
+ unsigned long ptr = st->entry[i].offset;
+
+ if (!ptr)
+ continue;
+ ptr += (unsigned long)st;
+ if (st->entry[i].type == UV_SYSTAB_TYPE_ARCH_TYPE)
+ return ptr;
+ }
+ return 0;
+}
+
+/* Validate UV arch type field in UVsystab */
+static int __init decode_arch_type(unsigned long ptr)
+{
+ struct uv_arch_type_entry *uv_ate = (struct uv_arch_type_entry *)ptr;
+ int n = strlen(uv_ate->archtype);
+
+ if (n > 0 && n < sizeof(uv_ate->archtype)) {
+ pr_info("UV: UVarchtype received from BIOS\n");
+ uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype);
+ return 1;
+ }
+ return 0;
+}
+
+/* Determine if UV arch type entry might exist in UVsystab */
+static int __init early_get_arch_type(void)
{
- int pnodeid;
- int uv_apic;
+ unsigned long uvst_physaddr, uvst_size, ptr;
+ struct uv_systab *st;
+ u32 rev;
+ int ret;
+
+ uvst_physaddr = get_uv_systab_phys(0);
+ if (!uvst_physaddr)
+ return 0;
+
+ st = early_memremap_ro(uvst_physaddr, sizeof(struct uv_systab));
+ if (!st) {
+ pr_err("UV: Cannot access UVsystab, remap failed\n");
+ return 0;
+ }
+ rev = st->revision;
+ if (rev < UV_SYSTAB_VERSION_UV5) {
+ early_memunmap(st, sizeof(struct uv_systab));
+ return 0;
+ }
+
+ uvst_size = st->size;
+ early_memunmap(st, sizeof(struct uv_systab));
+ st = early_memremap_ro(uvst_physaddr, uvst_size);
+ if (!st) {
+ pr_err("UV: Cannot access UVarchtype, remap failed\n");
+ return 0;
+ }
+
+ ptr = early_find_archtype(st);
+ if (!ptr) {
+ early_memunmap(st, uvst_size);
+ return 0;
+ }
+
+ ret = decode_arch_type(ptr);
+ early_memunmap(st, uvst_size);
+ return ret;
+}
+
+static int __init uv_set_system_type(char *_oem_id)
+{
+ /* Save OEM_ID passed from ACPI MADT */
uv_stringify(sizeof(oem_id), oem_id, _oem_id);
- uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
- if (strncmp(oem_id, "SGI", 3) != 0) {
- if (strncmp(oem_id, "NSGI", 4) != 0)
+ /* Check if BIOS sent us a UVarchtype */
+ if (!early_get_arch_type())
+
+ /* If not use OEM ID for UVarchtype */
+ uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id);
+
+ /* Check if not hubbed */
+ if (strncmp(uv_archtype, "SGI", 3) != 0) {
+
+ /* (Not hubbed), check if not hubless */
+ if (strncmp(uv_archtype, "NSGI", 4) != 0)
+
+ /* (Not hubless), not a UV */
return 0;
- /* UV4 Hubless, CH, (0x11:UV4+Any) */
- if (strncmp(oem_id, "NSGI4", 5) == 0)
+ /* UV4 Hubless: CH */
+ if (strncmp(uv_archtype, "NSGI4", 5) == 0)
uv_hubless_system = 0x11;
- /* UV3 Hubless, UV300/MC990X w/o hub (0x9:UV3+Any) */
+ /* UV3 Hubless: UV300/MC990X w/o hub */
else
uv_hubless_system = 0x9;
- pr_info("UV: OEM IDs %s/%s, HUBLESS(0x%x)\n",
- oem_id, oem_table_id, uv_hubless_system);
-
+ pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
+ oem_id, oem_table_id, uv_system_type, uv_hubless_system);
return 0;
}
@@ -252,60 +404,83 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
return 0;
}
- /* Set up early hub type field in uv_hub_info for Node 0 */
- uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+ /* Set hubbed type if true */
+ uv_hub_info->hub_revision =
+ !strncmp(uv_archtype, "SGI5", 4) ? UV5_HUB_REVISION_BASE :
+ !strncmp(uv_archtype, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
+ !strncmp(uv_archtype, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
+ !strcmp(uv_archtype, "SGI2") ? UV2_HUB_REVISION_BASE : 0;
+
+ switch (uv_hub_info->hub_revision) {
+ case UV5_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x21;
+ uv_hub_type_set(UV5);
+ break;
- /*
- * Determine UV arch type.
- * SGI2: UV2000/3000
- * SGI3: UV300 (truncated to 4 chars because of different varieties)
- * SGI4: UV400 (truncated to 4 chars because of different varieties)
- */
- if (!strncmp(oem_id, "SGI4", 4)) {
- uv_hub_info->hub_revision = UV4_HUB_REVISION_BASE;
+ case UV4_HUB_REVISION_BASE:
uv_hubbed_system = 0x11;
+ uv_hub_type_set(UV4);
+ break;
- } else if (!strncmp(oem_id, "SGI3", 4)) {
- uv_hub_info->hub_revision = UV3_HUB_REVISION_BASE;
+ case UV3_HUB_REVISION_BASE:
uv_hubbed_system = 0x9;
+ uv_hub_type_set(UV3);
+ break;
- } else if (!strcmp(oem_id, "SGI2")) {
- uv_hub_info->hub_revision = UV2_HUB_REVISION_BASE;
+ case UV2_HUB_REVISION_BASE:
uv_hubbed_system = 0x5;
+ uv_hub_type_set(UV2);
+ break;
- } else {
- uv_hub_info->hub_revision = 0;
- goto badbios;
+ default:
+ return 0;
}
- pnodeid = early_get_pnodeid();
- early_get_apic_socketid_shift();
+ /* Get UV hub chip part number & revision */
+ early_set_hub_type();
+ /* Other UV setup functions */
+ early_get_pnodeid();
+ early_get_apic_socketid_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
x86_platform.nmi_init = uv_nmi_init;
+ uv_tsc_check_sync();
+
+ return 1;
+}
+
+/* Called early to probe for the correct APIC driver */
+static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
+{
+ /* Set up early hub info fields for Node 0 */
+ uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+
+ /* If not UV, return. */
+ if (likely(uv_set_system_type(_oem_id) == 0))
+ return 0;
+
+ /* Save and Decode OEM Table ID */
+ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
- if (!strcmp(oem_table_id, "UVX")) {
- /* This is the most common hardware variant: */
+ /* This is the most common hardware variant, x2apic mode */
+ if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
- uv_apic = 0;
- } else if (!strcmp(oem_table_id, "UVL")) {
- /* Only used for very small systems: */
+ /* Only used for very small systems, usually 1 chassis, legacy mode */
+ else if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
- uv_apic = 0;
- } else {
+ else
goto badbios;
- }
- pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
- uv_tsc_check_sync();
+ pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n",
+ oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY),
+ uv_min_hub_revision_id);
- return uv_apic;
+ return 0;
badbios:
- pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
- pr_err("Current UV Type or BIOS not supported\n");
+ pr_err("UV: UVarchtype:%s not supported\n", uv_archtype);
BUG();
}
@@ -673,12 +848,12 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
};
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
-#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
+#define DEST_SHIFT UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
{
- union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
- union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
+ union uvh_rh_gam_alias_2_overlay_config_u alias;
+ union uvh_rh_gam_alias_2_redirect_config_u redirect;
unsigned long m_redirect;
unsigned long m_overlay;
int i;
@@ -686,16 +861,16 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
switch (i) {
case 0:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG;
break;
case 1:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG;
break;
case 2:
- m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
- m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
+ m_redirect = UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG;
+ m_overlay = UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG;
break;
}
alias.v = uv_read_local_mmr(m_overlay);
@@ -710,6 +885,7 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
}
enum map_type {map_wb, map_uc};
+static const char * const mt[] = { "WB", "UC" };
static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type)
{
@@ -721,23 +897,36 @@ static __init void map_high(char *id, unsigned long base, int pshift, int bshift
pr_info("UV: Map %s_HI base address NULL\n", id);
return;
}
- pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
if (map_type == map_uc)
init_extra_mapping_uc(paddr, bytes);
else
init_extra_mapping_wb(paddr, bytes);
+
+ pr_info("UV: Map %s_HI 0x%lx - 0x%lx %s (%d segments)\n",
+ id, paddr, paddr + bytes, mt[map_type], max_pnode + 1);
}
static __init void map_gru_high(int max_pnode)
{
- union uvh_rh_gam_gru_overlay_config_mmr_u gru;
- int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
- unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
- unsigned long base;
+ union uvh_rh_gam_gru_overlay_config_u gru;
+ unsigned long mask, base;
+ int shift;
+
+ if (UVH_RH_GAM_GRU_OVERLAY_CONFIG) {
+ gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG);
+ shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT;
+ mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK;
+ } else if (UVH_RH10_GAM_GRU_OVERLAY_CONFIG) {
+ gru.v = uv_read_local_mmr(UVH_RH10_GAM_GRU_OVERLAY_CONFIG);
+ shift = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT;
+ mask = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK;
+ } else {
+ pr_err("UV: GRU unavailable (no MMR)\n");
+ return;
+ }
- gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
if (!gru.s.enable) {
- pr_info("UV: GRU disabled\n");
+ pr_info("UV: GRU disabled (by BIOS)\n");
return;
}
@@ -749,62 +938,104 @@ static __init void map_gru_high(int max_pnode)
static __init void map_mmr_high(int max_pnode)
{
- union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
- int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
+ unsigned long base;
+ int shift;
+ bool enable;
+
+ if (UVH_RH10_GAM_MMR_OVERLAY_CONFIG) {
+ union uvh_rh10_gam_mmr_overlay_config_u mmr;
+
+ mmr.v = uv_read_local_mmr(UVH_RH10_GAM_MMR_OVERLAY_CONFIG);
+ enable = mmr.s.enable;
+ base = mmr.s.base;
+ shift = UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT;
+ } else if (UVH_RH_GAM_MMR_OVERLAY_CONFIG) {
+ union uvh_rh_gam_mmr_overlay_config_u mmr;
+
+ mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG);
+ enable = mmr.s.enable;
+ base = mmr.s.base;
+ shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT;
+ } else {
+ pr_err("UV:%s:RH_GAM_MMR_OVERLAY_CONFIG MMR undefined?\n",
+ __func__);
+ return;
+ }
- mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
- if (mmr.s.enable)
- map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
+ if (enable)
+ map_high("MMR", base, shift, shift, max_pnode, map_uc);
else
pr_info("UV: MMR disabled\n");
}
-/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */
-static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
-{
- unsigned long overlay;
- unsigned long mmr;
- unsigned long base;
- unsigned long nasid_mask;
- unsigned long m_overlay;
- int i, n, shift, m_io, max_io;
- int nasid, lnasid, fi, li;
- char *id;
-
- if (index == 0) {
- id = "MMIOH0";
- m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
- overlay = uv_read_local_mmr(m_overlay);
- base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
- mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
- m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
- >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
- shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
- n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
- nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
- } else {
- id = "MMIOH1";
- m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
- overlay = uv_read_local_mmr(m_overlay);
- base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
- mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
- m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
- >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
- shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
- n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
- nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
+/* Arch specific ENUM cases */
+enum mmioh_arch {
+ UV2_MMIOH = -1,
+ UVY_MMIOH0, UVY_MMIOH1,
+ UVX_MMIOH0, UVX_MMIOH1,
+};
+
+/* Calculate and Map MMIOH Regions */
+static void __init calc_mmioh_map(enum mmioh_arch index,
+ int min_pnode, int max_pnode,
+ int shift, unsigned long base, int m_io, int n_io)
+{
+ unsigned long mmr, nasid_mask;
+ int nasid, min_nasid, max_nasid, lnasid, mapped;
+ int i, fi, li, n, max_io;
+ char id[8];
+
+ /* One (UV2) mapping */
+ if (index == UV2_MMIOH) {
+ strncpy(id, "MMIOH", sizeof(id));
+ max_io = max_pnode;
+ mapped = 0;
+ goto map_exit;
}
- pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
- if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
- pr_info("UV: %s disabled\n", id);
+
+ /* small and large MMIOH mappings */
+ switch (index) {
+ case UVY_MMIOH0:
+ mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0;
+ nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK;
+ n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH;
+ min_nasid = min_pnode;
+ max_nasid = max_pnode;
+ mapped = 1;
+ break;
+ case UVY_MMIOH1:
+ mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1;
+ nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK;
+ n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH;
+ min_nasid = min_pnode;
+ max_nasid = max_pnode;
+ mapped = 1;
+ break;
+ case UVX_MMIOH0:
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0;
+ nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH;
+ min_nasid = min_pnode * 2;
+ max_nasid = max_pnode * 2;
+ mapped = 1;
+ break;
+ case UVX_MMIOH1:
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1;
+ nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH;
+ min_nasid = min_pnode * 2;
+ max_nasid = max_pnode * 2;
+ mapped = 1;
+ break;
+ default:
+ pr_err("UV:%s:Invalid mapping type:%d\n", __func__, index);
return;
}
- /* Convert to NASID: */
- min_pnode *= 2;
- max_pnode *= 2;
- max_io = lnasid = fi = li = -1;
+ /* enum values chosen so (index mod 2) is MMIOH 0/1 (low/high) */
+ snprintf(id, sizeof(id), "MMIOH%d", index%2);
+ max_io = lnasid = fi = li = -1;
for (i = 0; i < n; i++) {
unsigned long m_redirect = mmr + i * 8;
unsigned long redirect = uv_read_local_mmr(m_redirect);
@@ -814,9 +1045,12 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
id, redirect, m_redirect, nasid);
- /* Invalid NASID: */
- if (nasid < min_pnode || max_pnode < nasid)
+ /* Invalid NASID check */
+ if (nasid < min_nasid || max_nasid < nasid) {
+ pr_err("UV:%s:Invalid NASID:%x (range:%x..%x)\n",
+ __func__, index, min_nasid, max_nasid);
nasid = -1;
+ }
if (nasid == lnasid) {
li = i;
@@ -839,7 +1073,8 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
}
addr1 = (base << shift) + f * (1ULL << m_io);
addr2 = (base << shift) + (l + 1) * (1ULL << m_io);
- pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2);
+ pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
+ id, fi, li, lnasid, addr1, addr2);
if (max_io < l)
max_io = l;
}
@@ -847,49 +1082,93 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
lnasid = nasid;
}
- pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io);
+map_exit:
+ pr_info("UV: %s base:0x%lx shift:%d m_io:%d max_io:%d max_pnode:0x%x\n",
+ id, base, shift, m_io, max_io, max_pnode);
- if (max_io >= 0)
+ if (max_io >= 0 && !mapped)
map_high(id, base, shift, m_io, max_io, map_uc);
}
static __init void map_mmioh_high(int min_pnode, int max_pnode)
{
- union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
- unsigned long mmr, base;
- int shift, enable, m_io, n_io;
+ /* UVY flavor */
+ if (UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0) {
+ union uvh_rh10_gam_mmioh_overlay_config0_u mmioh0;
+ union uvh_rh10_gam_mmioh_overlay_config1_u mmioh1;
+
+ mmioh0.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0);
+ if (unlikely(mmioh0.s.enable == 0))
+ pr_info("UV: MMIOH0 disabled\n");
+ else
+ calc_mmioh_map(UVY_MMIOH0, min_pnode, max_pnode,
+ UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT,
+ mmioh0.s.base, mmioh0.s.m_io, mmioh0.s.n_io);
- if (is_uv3_hub() || is_uv4_hub()) {
- /* Map both MMIOH regions: */
- map_mmioh_high_uv34(0, min_pnode, max_pnode);
- map_mmioh_high_uv34(1, min_pnode, max_pnode);
+ mmioh1.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1);
+ if (unlikely(mmioh1.s.enable == 0))
+ pr_info("UV: MMIOH1 disabled\n");
+ else
+ calc_mmioh_map(UVY_MMIOH1, min_pnode, max_pnode,
+ UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT,
+ mmioh1.s.base, mmioh1.s.m_io, mmioh1.s.n_io);
return;
}
+ /* UVX flavor */
+ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0) {
+ union uvh_rh_gam_mmioh_overlay_config0_u mmioh0;
+ union uvh_rh_gam_mmioh_overlay_config1_u mmioh1;
+
+ mmioh0.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0);
+ if (unlikely(mmioh0.s.enable == 0))
+ pr_info("UV: MMIOH0 disabled\n");
+ else {
+ unsigned long base = uvxy_field(mmioh0, base, 0);
+ int m_io = uvxy_field(mmioh0, m_io, 0);
+ int n_io = uvxy_field(mmioh0, n_io, 0);
+
+ calc_mmioh_map(UVX_MMIOH0, min_pnode, max_pnode,
+ UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT,
+ base, m_io, n_io);
+ }
- if (is_uv2_hub()) {
- mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
- shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
- mmioh.v = uv_read_local_mmr(mmr);
- enable = !!mmioh.s2.enable;
- base = mmioh.s2.base;
- m_io = mmioh.s2.m_io;
- n_io = mmioh.s2.n_io;
-
- if (enable) {
- max_pnode &= (1 << n_io) - 1;
- pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
- base, shift, m_io, n_io, max_pnode);
- map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
- } else {
- pr_info("UV: MMIOH disabled\n");
+ mmioh1.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1);
+ if (unlikely(mmioh1.s.enable == 0))
+ pr_info("UV: MMIOH1 disabled\n");
+ else {
+ unsigned long base = uvxy_field(mmioh1, base, 0);
+ int m_io = uvxy_field(mmioh1, m_io, 0);
+ int n_io = uvxy_field(mmioh1, n_io, 0);
+
+ calc_mmioh_map(UVX_MMIOH1, min_pnode, max_pnode,
+ UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT,
+ base, m_io, n_io);
}
+ return;
+ }
+
+ /* UV2 flavor */
+ if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG) {
+ union uvh_rh_gam_mmioh_overlay_config_u mmioh;
+
+ mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG);
+ if (unlikely(mmioh.s2.enable == 0))
+ pr_info("UV: MMIOH disabled\n");
+ else
+ calc_mmioh_map(UV2_MMIOH, min_pnode, max_pnode,
+ UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT,
+ mmioh.s2.base, mmioh.s2.m_io, mmioh.s2.n_io);
+ return;
}
}
static __init void map_low_mmrs(void)
{
- init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
- init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
+ if (UV_GLOBAL_MMR32_BASE)
+ init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
+
+ if (UV_LOCAL_MMR_BASE)
+ init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
}
static __init void uv_rtc_init(void)
@@ -909,85 +1188,6 @@ static __init void uv_rtc_init(void)
}
}
-/*
- * percpu heartbeat timer
- */
-static void uv_heartbeat(struct timer_list *timer)
-{
- unsigned char bits = uv_scir_info->state;
-
- /* Flip heartbeat bit: */
- bits ^= SCIR_CPU_HEARTBEAT;
-
- /* Is this CPU idle? */
- if (idle_cpu(raw_smp_processor_id()))
- bits &= ~SCIR_CPU_ACTIVITY;
- else
- bits |= SCIR_CPU_ACTIVITY;
-
- /* Update system controller interface reg: */
- uv_set_scir_bits(bits);
-
- /* Enable next timer period: */
- mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
-}
-
-static int uv_heartbeat_enable(unsigned int cpu)
-{
- while (!uv_cpu_scir_info(cpu)->enabled) {
- struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
-
- uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
- timer_setup(timer, uv_heartbeat, TIMER_PINNED);
- timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
- add_timer_on(timer, cpu);
- uv_cpu_scir_info(cpu)->enabled = 1;
-
- /* Also ensure that boot CPU is enabled: */
- cpu = 0;
- }
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int uv_heartbeat_disable(unsigned int cpu)
-{
- if (uv_cpu_scir_info(cpu)->enabled) {
- uv_cpu_scir_info(cpu)->enabled = 0;
- del_timer(&uv_cpu_scir_info(cpu)->timer);
- }
- uv_set_cpu_scir_bits(cpu, 0xff);
- return 0;
-}
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
- uv_heartbeat_enable, uv_heartbeat_disable);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
-}
-
-static __init int uv_init_heartbeat(void)
-{
- int cpu;
-
- if (is_uv_system()) {
- for_each_online_cpu(cpu)
- uv_heartbeat_enable(cpu);
- }
-
- return 0;
-}
-
-late_initcall(uv_init_heartbeat);
-
-#endif /* !CONFIG_HOTPLUG_CPU */
-
/* Direct Legacy VGA I/O traffic to designated IOH */
static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
{
@@ -1027,26 +1227,22 @@ struct mn {
unsigned char n_lshift;
};
+/* Initialize caller's MN struct and fill in values */
static void get_mn(struct mn *mnp)
{
- union uvh_rh_gam_config_mmr_u m_n_config;
- union uv3h_gr0_gam_gr_config_u m_gr_config;
-
- /* Make sure the whole structure is well initialized: */
memset(mnp, 0, sizeof(*mnp));
-
- m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
- mnp->n_val = m_n_config.s.n_skt;
-
- if (is_uv4_hub()) {
+ mnp->n_val = uv_cpuid.n_skt;
+ if (is_uv(UV4|UVY)) {
mnp->m_val = 0;
mnp->n_lshift = 0;
} else if (is_uv3_hub()) {
- mnp->m_val = m_n_config.s3.m_skt;
- m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
+ union uvyh_gr0_gam_gr_config_u m_gr_config;
+
+ mnp->m_val = uv_cpuid.m_skt;
+ m_gr_config.v = uv_read_local_mmr(UVH_GR0_GAM_GR_CONFIG);
mnp->n_lshift = m_gr_config.s3.m_skt;
} else if (is_uv2_hub()) {
- mnp->m_val = m_n_config.s2.m_skt;
+ mnp->m_val = uv_cpuid.m_skt;
mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
}
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
@@ -1054,7 +1250,6 @@ static void get_mn(struct mn *mnp)
static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
{
- union uvh_node_id_u node_id;
struct mn mn;
get_mn(&mn);
@@ -1067,7 +1262,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->m_shift = mn.m_shift;
hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
hi->hub_revision = uv_hub_info->hub_revision;
+ hi->hub_type = uv_hub_info->hub_type;
hi->pnode_mask = uv_cpuid.pnode_mask;
+ hi->nasid_shift = uv_cpuid.nasid_shift;
hi->min_pnode = _min_pnode;
hi->min_socket = _min_socket;
hi->pnode_to_socket = _pnode_to_socket;
@@ -1076,9 +1273,8 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->gr_table_len = _gr_table_len;
hi->gr_table = _gr_table;
- node_id.v = uv_read_local_mmr(UVH_NODE_ID);
uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
- hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
+ hi->gnode_extra = (uv_node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
if (mn.m_val)
hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
@@ -1090,7 +1286,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
hi->gpa_shift = uv_gp_table->gpa_shift;
hi->gpa_mask = (1UL << hi->gpa_shift) - 1;
} else {
- hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE;
+ hi->global_mmr_base =
+ uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG) &
+ ~UV_MMR_ENABLE;
hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
}
@@ -1101,7 +1299,11 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
/* Show system specific info: */
pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift);
pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift);
- pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift);
+ pr_info("UV: mmr_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift);
+ if (hi->global_gru_base)
+ pr_info("UV: gru_base/shift:0x%lx/%ld\n",
+ hi->global_gru_base, hi->global_gru_shift);
+
pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra);
}
@@ -1173,21 +1375,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode);
}
+/* Walk through UVsystab decoding the fields */
static int __init decode_uv_systab(void)
{
struct uv_systab *st;
int i;
- /* If system is uv3 or lower, there is no extended UVsystab */
- if (is_uv_hubbed(0xfffffe) < uv(4) && is_uv_hubless(0xfffffe) < uv(4))
- return 0; /* No extended UVsystab required */
-
+ /* Get mapped UVsystab pointer */
st = uv_systab;
+
+ /* If UVsystab is version 1, there is no extended UVsystab */
+ if (st && st->revision == UV_SYSTAB_VERSION_1)
+ return 0;
+
if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) {
int rev = st ? st->revision : 0;
- pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST);
- pr_err("UV: Cannot support UV operations, switching to generic PC\n");
+ pr_err("UV: BIOS UVsystab mismatch, (%x < %x)\n",
+ rev, UV_SYSTAB_VERSION_UV4_LATEST);
+ pr_err("UV: Does not support UV, switch to non-UV x86_64\n");
uv_system_type = UV_NONE;
return -EINVAL;
@@ -1199,7 +1405,8 @@ static int __init decode_uv_systab(void)
if (!ptr)
continue;
- ptr = ptr + (unsigned long)st;
+ /* point to payload */
+ ptr += (unsigned long)st;
switch (st->entry[i].type) {
case UV_SYSTAB_TYPE_GAM_PARAMS:
@@ -1209,32 +1416,49 @@ static int __init decode_uv_systab(void)
case UV_SYSTAB_TYPE_GAM_RNG_TBL:
decode_gam_rng_tbl(ptr);
break;
+
+ case UV_SYSTAB_TYPE_ARCH_TYPE:
+ /* already processed in early startup */
+ break;
+
+ default:
+ pr_err("UV:%s:Unrecognized UV_SYSTAB_TYPE:%d, skipped\n",
+ __func__, st->entry[i].type);
+ break;
}
}
return 0;
}
-/*
- * Set up physical blade translations from UVH_NODE_PRESENT_TABLE
- * .. NB: UVH_NODE_PRESENT_TABLE is going away,
- * .. being replaced by GAM Range Table
- */
+/* Set up physical blade translations from UVH_NODE_PRESENT_TABLE */
static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info)
{
+ unsigned long np;
int i, uv_pb = 0;
- pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
- for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
- unsigned long np;
-
- np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
- if (np)
+ if (UVH_NODE_PRESENT_TABLE) {
+ pr_info("UV: NODE_PRESENT_DEPTH = %d\n",
+ UVH_NODE_PRESENT_TABLE_DEPTH);
+ for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
-
+ uv_pb += hweight64(np);
+ }
+ }
+ if (UVH_NODE_PRESENT_0) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_0);
+ pr_info("UV: NODE_PRESENT_0 = 0x%016lx\n", np);
+ uv_pb += hweight64(np);
+ }
+ if (UVH_NODE_PRESENT_1) {
+ np = uv_read_local_mmr(UVH_NODE_PRESENT_1);
+ pr_info("UV: NODE_PRESENT_1 = 0x%016lx\n", np);
uv_pb += hweight64(np);
}
if (uv_possible_blades != uv_pb)
uv_possible_blades = uv_pb;
+
+ pr_info("UV: number nodes/possible blades %d\n", uv_pb);
}
static void __init build_socket_tables(void)
@@ -1253,7 +1477,7 @@ static void __init build_socket_tables(void)
pr_info("UV: No UVsystab socket table, ignoring\n");
return;
}
- pr_crit("UV: Error: UVsystab address translations not available!\n");
+ pr_err("UV: Error: UVsystab address translations not available!\n");
BUG();
}
@@ -1379,9 +1603,9 @@ static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data)
return 0;
}
-static int __maybe_unused proc_oemid_show(struct seq_file *file, void *data)
+static int __maybe_unused proc_archtype_show(struct seq_file *file, void *data)
{
- seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
+ seq_printf(file, "%s/%s\n", uv_archtype, oem_table_id);
return 0;
}
@@ -1390,7 +1614,7 @@ static __init void uv_setup_proc_files(int hubless)
struct proc_dir_entry *pde;
pde = proc_mkdir(UV_PROC_NODE, NULL);
- proc_create_single("oemid", 0, pde, proc_oemid_show);
+ proc_create_single("archtype", 0, pde, proc_archtype_show);
if (hubless)
proc_create_single("hubless", 0, pde, proc_hubless_show);
else
@@ -1429,7 +1653,8 @@ static void __init uv_system_init_hub(void)
struct uv_hub_info_s hub_info = {0};
int bytes, cpu, nodeid;
unsigned short min_pnode = 9999, max_pnode = 0;
- char *hub = is_uv4_hub() ? "UV400" :
+ char *hub = is_uv5_hub() ? "UV500" :
+ is_uv4_hub() ? "UV400" :
is_uv3_hub() ? "UV300" :
is_uv2_hub() ? "UV2000/3000" : NULL;
@@ -1441,12 +1666,14 @@ static void __init uv_system_init_hub(void)
map_low_mmrs();
- /* Get uv_systab for decoding: */
+ /* Get uv_systab for decoding, setup UV BIOS calls */
uv_bios_init();
/* If there's an UVsystab problem then abort UV init: */
- if (decode_uv_systab() < 0)
+ if (decode_uv_systab() < 0) {
+ pr_err("UV: Mangled UVsystab format\n");
return;
+ }
build_socket_tables();
build_uv_gr_table();
@@ -1517,8 +1744,6 @@ static void __init uv_system_init_hub(void)
uv_hub_info_list(numa_node_id)->pnode = pnode;
else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
uv_cpu_hub_info(cpu)->pnode = pnode;
-
- uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
}
for_each_node(nodeid) {
@@ -1547,7 +1772,6 @@ static void __init uv_system_init_hub(void)
uv_nmi_setup();
uv_cpu_init();
- uv_scir_register_cpu_notifier();
uv_setup_proc_files(0);
/* Register Legacy VGA I/O redirection handler: */
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index c7503be92f35..57074cf3ad7c 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -248,7 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
switch (leaf) {
case 1:
l1 = &l1i;
- /* fall through */
+ fallthrough;
case 0:
if (!l1->val)
return;
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index c5cf336e5077..345f7d905db6 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -65,6 +65,9 @@ static void init_c3(struct cpuinfo_x86 *c)
c->x86_cache_alignment = c->x86_clflush_size * 2;
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
+
+ if (c->x86 >= 7)
+ set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
enum {
@@ -90,18 +93,15 @@ enum {
static void early_init_centaur(struct cpuinfo_x86 *c)
{
- switch (c->x86) {
#ifdef CONFIG_X86_32
- case 5:
- /* Emulate MTRRs using Centaur's MCR. */
+ /* Emulate MTRRs using Centaur's MCR. */
+ if (c->x86 == 5)
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
- break;
#endif
- case 6:
- if (c->x86_model >= 0xf)
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- break;
- }
+ if ((c->x86 == 6 && c->x86_model >= 0xf) ||
+ (c->x86 >= 7))
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
@@ -145,9 +145,8 @@ static void init_centaur(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
- switch (c->x86) {
#ifdef CONFIG_X86_32
- case 5:
+ if (c->x86 == 5) {
switch (c->x86_model) {
case 4:
name = "C6";
@@ -207,12 +206,10 @@ static void init_centaur(struct cpuinfo_x86 *c)
c->x86_cache_size = (cc>>24)+(dd>>24);
}
sprintf(c->x86_model_id, "WinChip %s", name);
- break;
+ }
#endif
- case 6:
+ if (c->x86 == 6 || c->x86 >= 7)
init_c3(c);
- break;
- }
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c5d6f17d9b9d..3c7519398ad5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -23,6 +23,7 @@
#include <linux/syscore_ops.h>
#include <linux/pgtable.h>
+#include <asm/cmdline.h>
#include <asm/stackprotector.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
@@ -1221,6 +1222,59 @@ static void detect_nopl(void)
}
/*
+ * We parse cpu parameters early because fpu__init_system() is executed
+ * before parse_early_param().
+ */
+static void __init cpu_parse_early_param(void)
+{
+ char arg[128];
+ char *argptr = arg;
+ int arglen, res, bit;
+
+#ifdef CONFIG_X86_32
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+#ifdef CONFIG_MATH_EMULATION
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+#else
+ pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
+#endif
+
+ if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
+ setup_clear_cpu_cap(X86_FEATURE_FXSR);
+#endif
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsave"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
+ if (arglen <= 0)
+ return;
+
+ pr_info("Clearing CPUID bits:");
+ do {
+ res = get_option(&argptr, &bit);
+ if (res == 0 || res == 3)
+ break;
+
+ /* If the argument was too long, the last bit may be cut off */
+ if (res == 1 && arglen >= sizeof(arg))
+ break;
+
+ if (bit >= 0 && bit < NCAPINTS * 32) {
+ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
+ setup_clear_cpu_cap(bit);
+ }
+ } while (res == 2);
+ pr_cont("\n");
+}
+
+/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
* cache alignment.
@@ -1255,6 +1309,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
get_cpu_cap(c);
get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
+ cpu_parse_early_param();
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 3e30b26c50ef..d502241995a3 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -69,6 +69,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
{ X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
{ X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES },
{ X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA },
{}
};
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 99be063fcb1b..0c6b02dd744c 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -132,49 +132,49 @@ static enum smca_bank_types smca_get_bank_type(unsigned int bank)
}
static struct smca_hwid smca_hwid_mcatypes[] = {
- /* { bank_type, hwid_mcatype, xec_bitmap } */
+ /* { bank_type, hwid_mcatype } */
/* Reserved type */
- { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
+ { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0) },
/* ZN Core (HWID=0xB0) MCA types */
- { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
- { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF },
- { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
- { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
- { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
+ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0) },
+ { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10) },
+ { SMCA_IF, HWID_MCATYPE(0xB0, 0x1) },
+ { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2) },
+ { SMCA_DE, HWID_MCATYPE(0xB0, 0x3) },
/* HWID 0xB0 MCATYPE 0x4 is Reserved */
- { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF },
- { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F },
- { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
+ { SMCA_EX, HWID_MCATYPE(0xB0, 0x5) },
+ { SMCA_FP, HWID_MCATYPE(0xB0, 0x6) },
+ { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7) },
/* Data Fabric MCA types */
- { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
- { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F },
- { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF },
+ { SMCA_CS, HWID_MCATYPE(0x2E, 0x0) },
+ { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) },
+ { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) },
/* Unified Memory Controller MCA type */
- { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF },
+ { SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
/* Parameter Block MCA type */
- { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 },
+ { SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
/* Platform Security Processor MCA type */
- { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 },
- { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF },
+ { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0) },
+ { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1) },
/* System Management Unit MCA type */
- { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
- { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF },
+ { SMCA_SMU, HWID_MCATYPE(0x01, 0x0) },
+ { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1) },
/* Microprocessor 5 Unit MCA type */
- { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF },
+ { SMCA_MP5, HWID_MCATYPE(0x01, 0x2) },
/* Northbridge IO Unit MCA type */
- { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F },
+ { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) },
/* PCI Express Unit MCA type */
- { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F },
+ { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index f43a78bde670..1c08cb9eb9f6 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -40,7 +40,6 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
#include <linux/export.h>
-#include <linux/jump_label.h>
#include <linux/set_memory.h>
#include <linux/sync_core.h>
#include <linux/task_work.h>
@@ -373,42 +372,105 @@ static int msr_to_offset(u32 msr)
return -1;
}
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
+
+ show_stack_regs(regs);
+
+ panic("MCA architectural violation!\n");
+
+ while (true)
+ cpu_relax();
+
+ return true;
+}
+
/* MSR access wrappers used for error injection */
-static u64 mce_rdmsrl(u32 msr)
+static noinstr u64 mce_rdmsrl(u32 msr)
{
- u64 v;
+ DECLARE_ARGS(val, low, high);
if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
+ int offset;
+ u64 ret;
+
+ instrumentation_begin();
+ offset = msr_to_offset(msr);
if (offset < 0)
- return 0;
- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
- }
+ ret = 0;
+ else
+ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
- if (rdmsrl_safe(msr, &v)) {
- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
- /*
- * Return zero in case the access faulted. This should
- * not happen normally but can happen if the CPU does
- * something weird, or if the code is buggy.
- */
- v = 0;
+ instrumentation_end();
+
+ return ret;
}
- return v;
+ /*
+ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
+ * architectural violation and needs to be reported to hw vendor. Panic
+ * the box to not allow any further progress.
+ */
+ asm volatile("1: rdmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
+ : EAX_EDX_RET(val, low, high) : "c" (msr));
+
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
+ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
+ regs->ip, (void *)regs->ip);
+
+ show_stack_regs(regs);
+
+ panic("MCA architectural violation!\n");
+
+ while (true)
+ cpu_relax();
+
+ return true;
}
-static void mce_wrmsrl(u32 msr, u64 v)
+static noinstr void mce_wrmsrl(u32 msr, u64 v)
{
+ u32 low, high;
+
if (__this_cpu_read(injectm.finished)) {
- int offset = msr_to_offset(msr);
+ int offset;
+
+ instrumentation_begin();
+ offset = msr_to_offset(msr);
if (offset >= 0)
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
+
+ instrumentation_end();
+
return;
}
- wrmsrl(msr, v);
+
+ low = (u32)v;
+ high = (u32)(v >> 32);
+
+ /* See comment in mce_rdmsrl() */
+ asm volatile("1: wrmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
+ : : "c" (msr), "a"(low), "d" (high) : "memory");
}
/*
@@ -745,7 +807,7 @@ log_it:
goto clear_it;
mce_read_aux(&m, i);
- m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
+ m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false);
/*
* Don't get the IP here because it's unlikely to
* have anything to do with the actual error location.
@@ -794,7 +856,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs);
m->bank = i;
- if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
mce_read_aux(m, i);
*msg = tmp;
return 1;
@@ -872,7 +934,6 @@ static void mce_reign(void)
struct mce *m = NULL;
int global_worst = 0;
char *msg = NULL;
- char *nmsg = NULL;
/*
* This CPU is the Monarch and the other CPUs have run
@@ -880,12 +941,10 @@ static void mce_reign(void)
* Grade the severity of the errors of all the CPUs.
*/
for_each_possible_cpu(cpu) {
- int severity = mce_severity(&per_cpu(mces_seen, cpu),
- mca_cfg.tolerant,
- &nmsg, true);
- if (severity > global_worst) {
- msg = nmsg;
- global_worst = severity;
+ struct mce *mtmp = &per_cpu(mces_seen, cpu);
+
+ if (mtmp->severity > global_worst) {
+ global_worst = mtmp->severity;
m = &per_cpu(mces_seen, cpu);
}
}
@@ -895,8 +954,11 @@ static void mce_reign(void)
* This dumps all the mces in the log buffer and stops the
* other CPUs.
*/
- if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
+ if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+ /* call mce_severity() to get "msg" for panic */
+ mce_severity(m, NULL, mca_cfg.tolerant, &msg, true);
mce_panic("Fatal machine check", m, msg);
+ }
/*
* For UC somewhere we let the CPU who detects it handle it.
@@ -1105,7 +1167,7 @@ static noinstr bool mce_check_crashing_cpu(void)
return false;
}
-static void __mc_scan_banks(struct mce *m, struct mce *final,
+static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
unsigned long *toclear, unsigned long *valid_banks,
int no_way_out, int *worst)
{
@@ -1140,7 +1202,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final,
/* Set taint even when machine check was not enabled. */
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
- severity = mce_severity(m, cfg->tolerant, NULL, true);
+ severity = mce_severity(m, regs, cfg->tolerant, NULL, true);
/*
* When machine check was for corrected/deferred handler don't
@@ -1188,13 +1250,34 @@ static void kill_me_maybe(struct callback_head *cb)
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags)) {
+ if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
+ !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
+ sync_core();
return;
}
- pr_err("Memory error not recovered");
- kill_me_now(cb);
+ if (p->mce_vaddr != (void __user *)-1l) {
+ force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
+ } else {
+ pr_err("Memory error not recovered");
+ kill_me_now(cb);
+ }
+}
+
+static void queue_task_work(struct mce *m, int kill_it)
+{
+ current->mce_addr = m->addr;
+ current->mce_kflags = m->kflags;
+ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(m);
+
+ if (kill_it)
+ current->mce_kill_me.func = kill_me_now;
+ else
+ current->mce_kill_me.func = kill_me_maybe;
+
+ task_work_add(current, &current->mce_kill_me, true);
}
/*
@@ -1291,7 +1374,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
order = mce_start(&no_way_out);
}
- __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
+ __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
if (!no_way_out)
mce_clear_state(toclear);
@@ -1313,7 +1396,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
* make sure we have the right "msg".
*/
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
- mce_severity(&m, cfg->tolerant, &msg, true);
+ mce_severity(&m, regs, cfg->tolerant, &msg, true);
mce_panic("Local fatal machine check!", &m, msg);
}
}
@@ -1330,25 +1413,16 @@ noinstr void do_machine_check(struct pt_regs *regs)
if (worst > 0)
irq_work_queue(&mce_irq_work);
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-
- sync_core();
-
if (worst != MCE_AR_SEVERITY && !kill_it)
- return;
+ goto out;
/* Fault was in user mode and we need to take some action */
if ((m.cs & 3) == 3) {
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- current->mce_addr = m.addr;
- current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV);
- current->mce_whole_page = whole_page(&m);
- current->mce_kill_me.func = kill_me_maybe;
- if (kill_it)
- current->mce_kill_me.func = kill_me_now;
- task_work_add(current, &current->mce_kill_me, true);
+ queue_task_work(&m, kill_it);
+
} else {
/*
* Handle an MCE which has happened in kernel space but from
@@ -1363,7 +1437,12 @@ noinstr void do_machine_check(struct pt_regs *regs)
if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
mce_panic("Failed kernel mode recovery", &m, msg);
}
+
+ if (m.kflags & MCE_IN_KERNEL_COPYIN)
+ queue_task_work(&m, kill_it);
}
+out:
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1904,6 +1983,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
+ bool irq_state;
+
WARN_ON_ONCE(user_mode(regs));
/*
@@ -1914,7 +1995,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
mce_check_crashing_cpu())
return;
- nmi_enter();
+ irq_state = idtentry_enter_nmi(regs);
/*
* The call targets are marked noinstr, but objtool can't figure
* that out because it's an indirect call. Annotate it.
@@ -1925,7 +2006,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
@@ -2062,7 +2143,7 @@ void mce_disable_bank(int bank)
and older.
* mce=nobootlog Don't log MCEs from before booting.
* mce=bios_cmci_threshold Don't program the CMCI threshold
- * mce=recovery force enable memcpy_mcsafe()
+ * mce=recovery force enable copy_mc_fragile()
*/
static int __init mcheck_enable(char *str)
{
@@ -2670,13 +2751,10 @@ static void __init mcheck_debugfs_init(void)
static void __init mcheck_debugfs_init(void) { }
#endif
-DEFINE_STATIC_KEY_FALSE(mcsafe_key);
-EXPORT_SYMBOL_GPL(mcsafe_key);
-
static int __init mcheck_late_init(void)
{
if (mca_cfg.recovery)
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
mcheck_debugfs_init();
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index 03e51053592a..100fbeebdc72 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -67,7 +67,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
unlock:
mutex_unlock(&mce_chrdev_read_mutex);
- mce->kflags |= MCE_HANDLED_MCELOG;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ mce->kflags |= MCE_HANDLED_MCELOG;
+
return NOTIFY_OK;
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 7843ab3fde09..3a44346f2276 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -199,7 +199,7 @@ static int raise_local(void)
* calling irq_enter, but the necessary
* machinery isn't exported currently.
*/
- /*FALL THROUGH*/
+ fallthrough;
case MCJ_CTX_PROCESS:
raise_exception(m, NULL);
break;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index d8f9230d2034..abe9fe0fb851 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -193,7 +193,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
if (!atomic_sub_return(1, &cmci_storm_on_cpus))
pr_notice("CMCI storm subsided: switching to interrupt mode\n");
- /* FALLTHROUGH */
+ fallthrough;
case CMCI_STORM_SUBSIDED:
/*
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 6473070b5da4..88dcc79cfb07 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -38,7 +38,8 @@ int mce_gen_pool_add(struct mce *mce);
int mce_gen_pool_init(void);
struct llist_node *mce_gen_pool_prepare_records(void);
-extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
+extern int (*mce_severity)(struct mce *a, struct pt_regs *regs,
+ int tolerant, char **msg, bool is_excp);
struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled;
@@ -185,4 +186,14 @@ extern bool amd_filter_mce(struct mce *m);
static inline bool amd_filter_mce(struct mce *m) { return false; };
#endif
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr);
+
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr);
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index e1da619add19..83df991314c5 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -9,9 +9,14 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <asm/mce.h>
#include <linux/uaccess.h>
+#include <asm/mce.h>
+#include <asm/intel-family.h>
+#include <asm/traps.h>
+#include <asm/insn.h>
+#include <asm/insn-eval.h>
+
#include "internal.h"
/*
@@ -40,9 +45,14 @@ static struct severity {
unsigned char context;
unsigned char excp;
unsigned char covered;
+ unsigned char cpu_model;
+ unsigned char cpu_minstepping;
+ unsigned char bank_lo, bank_hi;
char *msg;
} severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
+#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
+#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
#define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
@@ -90,14 +100,9 @@ static struct severity {
EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
),
MCESEV(
- DEFERRED, "Deferred error",
- NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED)
- ),
- MCESEV(
KEEP, "Corrected error",
NOSER, BITCLR(MCI_STATUS_UC)
),
-
/*
* known AO MCACODs reported via MCE or CMC:
*
@@ -113,6 +118,18 @@ static struct severity {
AO, "Action optional: last level cache writeback error",
SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
),
+ /*
+ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
+ * to report uncorrected errors using CMCI with a special signature.
+ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
+ * in one of the memory controller banks.
+ * Set severity to "AO" for same action as normal patrol scrub error.
+ */
+ MCESEV(
+ AO, "Uncorrected Patrol Scrub Error",
+ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
+ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
+ ),
/* ignore OVER for UCNA */
MCESEV(
@@ -198,6 +215,47 @@ static struct severity {
#define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
(MCG_STATUS_RIPV|MCG_STATUS_EIPV))
+static bool is_copy_from_user(struct pt_regs *regs)
+{
+ u8 insn_buf[MAX_INSN_SIZE];
+ struct insn insn;
+ unsigned long addr;
+
+ if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
+ return false;
+
+ kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
+ insn_get_opcode(&insn);
+ if (!insn.opcode.got)
+ return false;
+
+ switch (insn.opcode.value) {
+ /* MOV mem,reg */
+ case 0x8A: case 0x8B:
+ /* MOVZ mem,reg */
+ case 0xB60F: case 0xB70F:
+ insn_get_modrm(&insn);
+ insn_get_sib(&insn);
+ if (!insn.modrm.got || !insn.sib.got)
+ return false;
+ addr = (unsigned long)insn_get_addr_ref(&insn, regs);
+ break;
+ /* REP MOVS */
+ case 0xA4: case 0xA5:
+ addr = regs->si;
+ break;
+ default:
+ return false;
+ }
+
+ if (fault_in_kernel_space(addr))
+ return false;
+
+ current->mce_vaddr = (void __user *)addr;
+
+ return true;
+}
+
/*
* If mcgstatus indicated that ip/cs on the stack were
* no good, then "m->cs" will be zero and we will have
@@ -209,15 +267,25 @@ static struct severity {
* distinguish an exception taken in user from from one
* taken in the kernel.
*/
-static int error_context(struct mce *m)
+static int error_context(struct mce *m, struct pt_regs *regs)
{
+ enum handler_type t;
+
if ((m->cs & 3) == 3)
return IN_USER;
+ if (!mc_recoverable(m->mcgstatus))
+ return IN_KERNEL;
- if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) {
+ t = ex_get_fault_handler_type(m->ip);
+ if (t == EX_HANDLER_FAULT) {
m->kflags |= MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
}
+ if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) {
+ m->kflags |= MCE_IN_KERNEL_RECOV;
+ m->kflags |= MCE_IN_KERNEL_COPYIN;
+ return IN_KERNEL_RECOV;
+ }
return IN_KERNEL;
}
@@ -253,9 +321,10 @@ static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
* See AMD Error Scope Hierarchy table in a newer BKDG. For example
* 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
*/
-static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
+static int mce_severity_amd(struct mce *m, struct pt_regs *regs, int tolerant,
+ char **msg, bool is_excp)
{
- enum context ctx = error_context(m);
+ enum context ctx = error_context(m, regs);
/* Processor Context Corrupt, no need to fumble too much, die! */
if (m->status & MCI_STATUS_PCC)
@@ -305,10 +374,11 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
return MCE_KEEP_SEVERITY;
}
-static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
+static int mce_severity_intel(struct mce *m, struct pt_regs *regs,
+ int tolerant, char **msg, bool is_excp)
{
enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
- enum context ctx = error_context(m);
+ enum context ctx = error_context(m, regs);
struct severity *s;
for (s = severities;; s++) {
@@ -324,6 +394,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
continue;
if (s->excp && excp != s->excp)
continue;
+ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
+ continue;
+ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
+ continue;
+ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
+ continue;
if (msg)
*msg = s->msg;
s->covered = 1;
@@ -336,7 +412,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
}
/* Default to mce_severity_intel */
-int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
+int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) =
mce_severity_intel;
void __init mcheck_vendor_init_severity(void)
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 72182809b333..ca670919b561 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -98,7 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
case 7:
if (size < 0x40)
break;
- /* Else, fall through */
+ fallthrough;
case 6:
case 5:
case 4:
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 9e1712e8aef7..e5f4ee8f4c3b 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -564,7 +564,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d = rdt_find_domain(r, id, &add_pos);
if (IS_ERR(d)) {
- pr_warn("Could't find cache id for cpu %d\n", cpu);
+ pr_warn("Couldn't find cache id for CPU %d\n", cpu);
return;
}
@@ -609,7 +609,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
d = rdt_find_domain(r, id, NULL);
if (IS_ERR_OR_NULL(d)) {
- pr_warn("Could't find cache id for cpu %d\n", cpu);
+ pr_warn("Couldn't find cache id for CPU %d\n", cpu);
return;
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index bccfc9ff3cc1..2eb0a8c44b35 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -42,6 +42,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 61ddc3a5e5c2..701f196d7c68 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -5,7 +5,6 @@
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
-#include <asm/cmdline.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -238,51 +237,11 @@ static void __init fpu__init_system_ctx_switch(void)
}
/*
- * We parse fpu parameters early because fpu__init_system() is executed
- * before parse_early_param().
- */
-static void __init fpu__init_parse_early_param(void)
-{
- char arg[32];
- char *argptr = arg;
- int bit;
-
-#ifdef CONFIG_X86_32
- if (cmdline_find_option_bool(boot_command_line, "no387"))
-#ifdef CONFIG_MATH_EMULATION
- setup_clear_cpu_cap(X86_FEATURE_FPU);
-#else
- pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
-#endif
-
- if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
- setup_clear_cpu_cap(X86_FEATURE_FXSR);
-#endif
-
- if (cmdline_find_option_bool(boot_command_line, "noxsave"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-
- if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-
- if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-
- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
- sizeof(arg)) &&
- get_option(&argptr, &bit) &&
- bit >= 0 &&
- bit < NCAPINTS * 32)
- setup_clear_cpu_cap(bit);
-}
-
-/*
* Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes:
*/
void __init fpu__init_system(struct cpuinfo_x86 *c)
{
- fpu__init_parse_early_param();
fpu__init_system_early_generic(c);
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 038e19c0019e..5d8047441a0a 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -37,6 +37,7 @@ static const char *xfeature_names[] =
"AVX-512 ZMM_Hi256" ,
"Processor Trace (unused)" ,
"Protection Keys User registers",
+ "PASID state",
"unknown xstate feature" ,
};
@@ -51,6 +52,7 @@ static short xsave_cpuid_features[] __initdata = {
X86_FEATURE_AVX512F,
X86_FEATURE_INTEL_PT,
X86_FEATURE_PKU,
+ X86_FEATURE_ENQCMD,
};
/*
@@ -318,6 +320,7 @@ static void __init print_xstate_features(void)
print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
print_xstate_feature(XFEATURE_MASK_PKRU);
+ print_xstate_feature(XFEATURE_MASK_PASID);
}
/*
@@ -592,6 +595,7 @@ static void check_xstate_against_struct(int nr)
XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
+ XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
/*
* Make *SURE* to add any feature numbers in below if
@@ -601,7 +605,7 @@ static void check_xstate_against_struct(int nr)
if ((nr < XFEATURE_YMM) ||
(nr >= XFEATURE_MAX) ||
(nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
- ((nr >= XFEATURE_RSRVD_COMP_10) && (nr <= XFEATURE_LBR))) {
+ ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
WARN_ONCE(1, "no structure for xstate: %d\n", nr);
XSTATE_WARN_ON(1);
}
@@ -1398,3 +1402,60 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+
+#ifdef CONFIG_IOMMU_SUPPORT
+void update_pasid(void)
+{
+ u64 pasid_state;
+ u32 pasid;
+
+ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
+ return;
+
+ if (!current->mm)
+ return;
+
+ pasid = READ_ONCE(current->mm->pasid);
+ /* Set the valid bit in the PASID MSR/state only for valid pasid. */
+ pasid_state = pasid == PASID_DISABLED ?
+ pasid : pasid | MSR_IA32_PASID_VALID;
+
+ /*
+ * No need to hold fregs_lock() since the task's fpstate won't
+ * be changed by others (e.g. ptrace) while the task is being
+ * switched to or is in IPI.
+ */
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ /* The MSR is active and can be directly updated. */
+ wrmsrl(MSR_IA32_PASID, pasid_state);
+ } else {
+ struct fpu *fpu = &current->thread.fpu;
+ struct ia32_pasid_state *ppasid_state;
+ struct xregs_state *xsave;
+
+ /*
+ * The CPU's xstate registers are not currently active. Just
+ * update the PASID state in the memory buffer here. The
+ * PASID MSR will be loaded when returning to user mode.
+ */
+ xsave = &fpu->state.xsave;
+ xsave->header.xfeatures |= XFEATURE_MASK_PASID;
+ ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
+ /*
+ * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
+ * won't be NULL and no need to check its value.
+ *
+ * Only update the task's PASID state when it's different
+ * from the mm's pasid.
+ */
+ if (ppasid_state->pasid != pasid_state) {
+ /*
+ * Invalid fpregs so that state restoring will pick up
+ * the PASID state.
+ */
+ __fpu_invalidate_fpregs_state(fpu);
+ ppasid_state->pasid = pasid_state;
+ }
+ }
+}
+#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8cdf29ffd95f..b98ff620ba77 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -349,7 +349,7 @@ static int arch_build_bp_info(struct perf_event *bp,
hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 7ecf9babf0cb..1bffb87dcfdc 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -149,9 +149,6 @@ static const __initconst struct idt_data apic_idts[] = {
# ifdef CONFIG_IRQ_WORK
INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work),
# endif
-# ifdef CONFIG_X86_UV
- INTG(UV_BAU_MESSAGE, asm_sysvec_uv_bau_message),
-# endif
INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt),
INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt),
#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 181060247e3c..c5dd50369e2f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_64))
- run_on_irqstack_cond(desc->handle_irq, desc, regs);
+ run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
else
__handle_irq(desc, regs);
}
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1b4fe93a86c5..440eed558558 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
void do_softirq_own_stack(void)
{
- run_on_irqstack_cond(__do_softirq, NULL, NULL);
+ run_on_irqstack_cond(__do_softirq, NULL);
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 68acd30c6b87..c2f02f308ecf 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -450,7 +450,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
/* clear the trace bit */
@@ -539,7 +539,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
* a system call which should be ignored
*/
return NOTIFY_DONE;
- /* fall through */
+ fallthrough;
default:
if (user_mode(regs))
return NOTIFY_DONE;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08320b0b2b27..9663ba31347c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
- irqentry_state_t state;
- state = irqentry_enter(regs);
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
- irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 411af4aa7b51..5c358ccf6649 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -24,7 +24,6 @@
#include <asm/irqdomain.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
-#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/bios_ebda.h>
#include <asm/e820/api.h>
@@ -312,7 +311,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
case 2:
if (i == 0 || i == 13)
continue; /* IRQ0 & IRQ13 not connected */
- /* fall through */
+ fallthrough;
default:
if (i == 2)
continue; /* IRQ2 is never connected */
@@ -356,7 +355,7 @@ static void __init construct_ioapic_table(int mpc_default_type)
default:
pr_err("???\nUnknown standard configuration %d\n",
mpc_default_type);
- /* fall through */
+ fallthrough;
case 1:
case 5:
memcpy(bus.bustype, "ISA ", 6);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 49dcfb85e773..c0d409810658 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -80,18 +80,30 @@ static ssize_t msr_read(struct file *file, char __user *buf,
static int filter_write(u32 reg)
{
+ /*
+ * MSRs writes usually happen all at once, and can easily saturate kmsg.
+ * Only allow one message every 30 seconds.
+ *
+ * It's possible to be smarter here and do it (for example) per-MSR, but
+ * it would certainly be more complex, and this is enough at least to
+ * avoid saturating the ring buffer.
+ */
+ static DEFINE_RATELIMIT_STATE(fw_rs, 30 * HZ, 1);
+
switch (allow_writes) {
case MSR_WRITES_ON: return 0;
case MSR_WRITES_OFF: return -EPERM;
default: break;
}
+ if (!__ratelimit(&fw_rs))
+ return 0;
+
if (reg == MSR_IA32_ENERGY_PERF_BIAS)
return 0;
- pr_err_ratelimited("Write to unrecognized MSR 0x%x by %s\n"
- "Please report to x86@kernel.org\n",
- reg, current->comm);
+ pr_err("Write to unrecognized MSR 0x%x by %s (pid: %d). Please report to x86@kernel.org.\n",
+ reg, current->comm, current->pid);
return 0;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 4fc9954a9560..47381666d6a5 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs);
static void nmi_check_duration(struct nmiaction *action, u64 duration)
{
- u64 whole_msecs = READ_ONCE(action->max_duration);
int remainder_ns, decimal_msecs;
if (duration < nmi_longest_ns || duration < action->max_duration)
@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration;
- remainder_ns = do_div(whole_msecs, (1000 * 1000));
+ remainder_ns = do_div(duration, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
printk_ratelimited(KERN_INFO
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
- action->handler, whole_msecs, decimal_msecs);
+ action->handler, duration, decimal_msecs);
}
static int nmi_handle(unsigned int type, struct pt_regs *regs)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 994d8393f2f7..ba4593a913fa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -42,6 +42,7 @@
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
+#include <asm/frame.h>
#include "process.h"
@@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
- frame->bp = 0;
+ frame->bp = encode_frame_pointer(childregs);
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
p->thread.io_bitmap = NULL;
@@ -684,9 +685,7 @@ void arch_cpu_idle(void)
*/
void __cpuidle default_idle(void)
{
- trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
@@ -792,7 +791,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
- trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@@ -804,7 +802,6 @@ static __cpuidle void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9afefe325acb..df342bedea88 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -407,7 +407,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
{
unsigned long gsbase;
- if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
@@ -422,7 +422,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
- if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
+ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
@@ -439,7 +439,7 @@ unsigned long x86_fsbase_read_task(struct task_struct *task)
if (task == current)
fsbase = x86_fsbase_read_cpu();
- else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.fsindex == 0))
fsbase = task->thread.fsbase;
else
@@ -454,7 +454,7 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
if (task == current)
gsbase = x86_gsbase_read_cpu_inactive();
- else if (static_cpu_has(X86_FEATURE_FSGSBASE) ||
+ else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.gsindex == 0))
gsbase = task->thread.gsbase;
else
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 5679aa3fdcb8..e7537c5440bb 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -204,7 +204,7 @@ static int set_segment_reg(struct task_struct *task,
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
- /* Else, fall through */
+ fallthrough;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 1b10717c9321..6d0df6a58873 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -8,6 +8,7 @@
#include <asm/hpet.h>
#include <asm/setup.h>
+#include <asm/mce.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
@@ -624,10 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
-#include <linux/jump_label.h>
-#include <asm/string_64.h>
-
/* Ivy Bridge, Haswell, Broadwell */
static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
{
@@ -636,7 +633,7 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
pci_read_config_dword(pdev, 0x84, &capid0);
if (capid0 & 0x10)
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
}
/* Skylake */
@@ -653,7 +650,7 @@ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
* enabled, so memory machine check recovery is also enabled.
*/
if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
- static_branch_inc(&mcsafe_key);
+ enable_copy_mc_fragile();
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
@@ -661,7 +658,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
-#endif
bool x86_apple_machine;
EXPORT_SYMBOL(x86_apple_machine);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0ec7ced727fe..a515e2d230b7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -654,7 +654,7 @@ static void native_machine_emergency_restart(void)
case BOOT_CF9_FORCE:
port_cf9_safe = true;
- /* Fall through */
+ fallthrough;
case BOOT_CF9_SAFE:
if (port_cf9_safe) {
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index d5fa494c2304..be0d7d4152ec 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -726,7 +726,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->ax = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->ax = regs->orig_ax;
regs->ip -= 2;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 9ccbf0576cd0..a7f3e12cfbdb 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -27,7 +27,7 @@ static inline void signal_compat_build_tests(void)
*/
BUILD_BUG_ON(NSIGILL != 11);
BUILD_BUG_ON(NSIGFPE != 15);
- BUILD_BUG_ON(NSIGSEGV != 7);
+ BUILD_BUG_ON(NSIGSEGV != 9);
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 5);
BUILD_BUG_ON(NSIGCHLD != 6);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 27aa04a95702..f5ef689dd62a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1594,14 +1594,28 @@ int native_cpu_disable(void)
if (ret)
return ret;
- /*
- * Disable the local APIC. Otherwise IPI broadcasts will reach
- * it. It still responds normally to INIT, NMI, SMI, and SIPI
- * messages.
- */
- apic_soft_disable();
cpu_disable_common();
+ /*
+ * Disable the local APIC. Otherwise IPI broadcasts will reach
+ * it. It still responds normally to INIT, NMI, SMI, and SIPI
+ * messages.
+ *
+ * Disabling the APIC must happen after cpu_disable_common()
+ * which invokes fixup_irqs().
+ *
+ * Disabling the APIC preserves already set bits in IRR, but
+ * an interrupt arriving after disabling the local APIC does not
+ * set the corresponding IRR bit.
+ *
+ * fixup_irqs() scans IRR for set bits so it can raise a not
+ * yet handled interrupt on the new destination CPU via an IPI
+ * but obviously it can't do so for IRR bits which are not set.
+ * IOW, interrupts arriving after disabling the local APIC will
+ * be lost.
+ */
+ apic_soft_disable();
+
return 0;
}
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 2fd698e28e4d..8627fda8d993 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -18,13 +18,13 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct unwind_state state;
unsigned long addr;
- if (regs && !consume_entry(cookie, regs->ip, false))
+ if (regs && !consume_entry(cookie, regs->ip))
return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
- if (!addr || !consume_entry(cookie, addr, false))
+ if (!addr || !consume_entry(cookie, addr))
break;
}
}
@@ -72,7 +72,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (!addr)
return -EINVAL;
- if (!consume_entry(cookie, addr, false))
+ if (!consume_entry(cookie, addr))
return -EINVAL;
}
@@ -114,7 +114,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
{
const void __user *fp = (const void __user *)regs->bp;
- if (!consume_entry(cookie, regs->ip, false))
+ if (!consume_entry(cookie, regs->ip))
return;
while (1) {
@@ -128,7 +128,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
break;
if (!frame.ret_addr)
break;
- if (!consume_entry(cookie, frame.ret_addr, false))
+ if (!consume_entry(cookie, frame.ret_addr))
break;
fp = frame.next_fp;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1f66d2d1e998..81a2fb711091 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+static __always_inline unsigned long debug_read_clear_dr6(void)
{
- /*
- * Disable breakpoints during exception handling; recursive exceptions
- * are exceedingly 'fun'.
- *
- * Since this function is NOKPROBE, and that also applies to
- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
- * HW_BREAKPOINT_W on our stack)
- *
- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
- * includes the entry stack is excluded for everything.
- */
- *dr7 = local_db_save();
+ unsigned long dr6;
/*
* The Intel SDM says:
@@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
*
* Keep it simple: clear DR6 immediately.
*/
- get_debugreg(*dr6, 6);
+ get_debugreg(dr6, 6);
set_debugreg(0, 6);
/* Filter out all the reserved bits which are preset to 1 */
- *dr6 &= ~DR6_RESERVED;
-}
+ dr6 &= ~DR6_RESERVED;
-static __always_inline void debug_exit(unsigned long dr7)
-{
- local_db_restore(dr7);
+ return dr6;
}
/*
@@ -863,6 +849,18 @@ out:
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
@@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
+
+ local_db_restore(dr7);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
@@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
+ /*
+ * NB: We can't easily clear DR7 here because
+ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * user memory, etc. This means that a recursive #DB is possible. If
+ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
+ * Since we're not on the IST stack right now, everything will be
+ * fine.
+ */
+
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
@@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_kernel(regs, dr6);
- debug_exit(dr7);
+ exc_debug_kernel(regs, debug_read_clear_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_user(regs, dr6);
- debug_exit(dr7);
+ exc_debug_user(regs, debug_read_clear_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
+ unsigned long dr6 = debug_read_clear_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);
else
exc_debug_kernel(regs, dr6);
-
- debug_exit(dr7);
}
#endif
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 8d5cbe1bbb3b..2c304fd0bb1a 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -45,11 +45,12 @@
* value that, lies close to the top of the kernel memory. The limit for the GDT
* and the IDT are set to zero.
*
- * Given that SLDT and STR are not commonly used in programs that run on WineHQ
- * or DOSEMU2, they are not emulated.
- *
- * The instruction smsw is emulated to return the value that the register CR0
+ * The instruction SMSW is emulated to return the value that the register CR0
* has at boot time as set in the head_32.
+ * SLDT and STR are emulated to return the values that the kernel programmatically
+ * assigns:
+ * - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not.
+ * - STR returns (GDT_ENTRY_TSS * 8).
*
* Emulation is provided for both 32-bit and 64-bit processes.
*
@@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size += UMIP_GDT_IDT_LIMIT_SIZE;
memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
- } else if (umip_inst == UMIP_INST_SMSW) {
- unsigned long dummy_value = CR0_STATE;
+ } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT ||
+ umip_inst == UMIP_INST_STR) {
+ unsigned long dummy_value;
+
+ if (umip_inst == UMIP_INST_SMSW) {
+ dummy_value = CR0_STATE;
+ } else if (umip_inst == UMIP_INST_STR) {
+ dummy_value = GDT_ENTRY_TSS * 8;
+ } else if (umip_inst == UMIP_INST_SLDT) {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ down_read(&current->mm->context.ldt_usr_sem);
+ if (current->mm->context.ldt)
+ dummy_value = GDT_ENTRY_LDT * 8;
+ else
+ dummy_value = 0;
+ up_read(&current->mm->context.ldt_usr_sem);
+#else
+ dummy_value = 0;
+#endif
+ }
/*
- * Even though the CR0 register has 4 bytes, the number
+ * For these 3 instructions, the number
* of bytes to be copied in the result buffer is determined
* by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least
- * siginificant bytes of CR0.
+ * siginificant bytes.
*/
if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes;
@@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size = 2;
memcpy(data, &dummy_value, *data_size);
- /* STR and SLDT are not emulated */
} else {
return -EINVAL;
}
@@ -383,10 +401,6 @@ bool fixup_umip_exception(struct pt_regs *regs)
umip_pr_warn(regs, "%s instruction cannot be used by applications.\n",
umip_insns[umip_inst]);
- /* Do not emulate (spoof) SLDT or STR. */
- if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT)
- return false;
-
umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n");
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 15e5aad8ac2c..3fdaa042823d 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -735,7 +735,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* OPCODE1() of the "short" jmp which checks the same condition.
*/
opc1 = OPCODE2(insn) - 0x10;
- /* fall through */
+ fallthrough;
default:
if (!is_cond_jmp_opcode(opc1))
return -ENOSYS;
@@ -892,7 +892,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
fix_ip_or_call = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
riprel_analyze(auprobe, &insn);
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3fd6eec202d7..7456f9ad424b 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void)
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
- F(SERIALIZE)
+ F(SERIALIZE) | F(TSXLDTRK)
);
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d0e2825ae617..2f6510de6b0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7f60);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
- ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
+
+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
@@ -3016,7 +3028,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
- /* fall through */
+ fallthrough;
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 814d3aee5cef..1d330564eed8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
- /* fall through - maybe userspace knows this conn_id. */
+ fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index c47d2acec529..4aa1c2e00e2a 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
switch (ue->u.irqchip.irqchip) {
case KVM_IRQCHIP_PIC_SLAVE:
e->irqchip.pin += PIC_NUM_PINS / 2;
- /* fall through */
+ fallthrough;
case KVM_IRQCHIP_PIC_MASTER:
if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
return -EINVAL;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5ccbee7165a2..35cca2e0c802 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
- /* fall through */
+ fallthrough;
case APIC_DM_FIXED:
if (unlikely(trig_mode && !level))
break;
@@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
- /* fall thru */
+ fallthrough;
default:
val = kvm_lapic_get_reg(apic, offset);
break;
@@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
- /* fall through */
+ fallthrough;
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4e03841f053d..71aa3da2a0b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
@@ -2468,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
@@ -4421,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
- /* fall through */
+ fallthrough;
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..e90bc436f584 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
- svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ /* On vmexit the GIF is set to false */
+ svm_set_gif(svm, false);
+
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
load_nested_vmcb_control(svm, &ctl);
nested_prepare_vmcb_control(svm);
+ if (!nested_svm_vmrun_msrpm(svm))
+ return -EINVAL;
+
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..3c9a45efdd4d 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
uint8_t *page_virtual;
unsigned long i;
- if (npages == 0 || pages == NULL)
+ if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
+ pages == NULL)
return;
for (i = 0; i < npages; i++) {
@@ -1106,6 +1107,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 03dd7bac8034..91ea74ae71b8 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm)
return 1;
}
+static int invd_interception(struct vcpu_svm *svm)
+{
+ /* Treat an INVD instruction as a NOP and just skip it. */
+ return kvm_skip_emulated_instruction(&svm->vcpu);
+}
+
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
@@ -2668,7 +2674,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
- /* Fall through */
+ fallthrough;
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
- [SVM_EXIT_INVD] = emulate_on_interception,
+ [SVM_EXIT_INVD] = invd_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
@@ -2938,8 +2944,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- svm_complete_interrupts(svm);
-
if (is_guest_mode(vcpu)) {
int vmexit;
@@ -3504,7 +3508,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
- exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@@ -3518,6 +3521,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+ vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@@ -3537,7 +3541,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
- vmcb_mark_all_clean(svm->vmcb);
+ svm_complete_interrupts(svm);
+ exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@@ -3900,21 +3905,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *nested_vmcb;
struct kvm_host_map map;
- u64 guest;
- u64 vmcb;
int ret = 0;
- guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (guest) {
- if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
- return 1;
- nested_vmcb = map.hva;
- ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (guest) {
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
+
+ if (!(saved_efer & EFER_SVME))
+ return 1;
+
+ if (kvm_vcpu_map(&svm->vcpu,
+ gpa_to_gfn(vmcb), &map) == -EINVAL)
+ return 1;
+
+ ret = enter_svm_guest_mode(svm, vmcb, map.hva);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ }
}
return ret;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..1bb6b31eb646 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
+ /*
+ * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+ * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
+ * up-to-date before switching to L1.
+ */
+ if (enable_ept && is_pae_paging(vcpu))
+ vmx_ept_load_pdptrs(vcpu);
+
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46ba2e03a892..96979c09ebd1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif
+extern bool __read_mostly allow_smaller_maxphyaddr;
+module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
+
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON \
@@ -791,6 +794,18 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
+ else {
+ /*
+ * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
+ * between guest and host. In that case we only care about present
+ * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
+ * prepare_vmcs02_rare.
+ */
+ bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
+ int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
+ }
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -2971,7 +2986,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -3114,7 +3129,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
- ept_load_pdptrs(vcpu);
+ vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@@ -4352,16 +4367,6 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmx->pt_desc.guest.output_mask = 0x7F;
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
}
-
- /*
- * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
- * between guest and host. In that case we only care about present
- * faults.
- */
- if (enable_ept) {
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK);
- }
}
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -4654,7 +4659,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return false;
- /* fall through */
+ fallthrough;
case DB_VECTOR:
return !(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
@@ -4803,6 +4808,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
* EPT will cause page fault only if we need to
* detect illegal GPAs.
*/
+ WARN_ON_ONCE(!allow_smaller_maxphyaddr);
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
return 1;
} else
@@ -4827,7 +4833,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
- /* fall through */
+ fallthrough;
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
@@ -5257,7 +5263,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
- /* fall through */
+ fallthrough;
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -5331,7 +5337,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* would also use advanced VM-exit information for EPT violations to
* reconstruct the page fault error code.
*/
- if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
+ if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
return kvm_emulate_instruction(vcpu, 0);
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@@ -5610,7 +5616,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
* keeping track of global entries in shadow page tables.
*/
- /* fall-through */
+ fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
@@ -6054,6 +6060,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -6578,7 +6585,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
@@ -6588,7 +6595,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
@@ -8304,11 +8311,12 @@ static int __init vmx_init(void)
vmx_check_vmcs12_offsets();
/*
- * Intel processors don't have problems with
- * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
- * it for VMX by default
+ * Shadow paging doesn't have a (further) performance penalty
+ * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
+ * by default
*/
- allow_smaller_maxphyaddr = true;
+ if (!enable_ept)
+ allow_smaller_maxphyaddr = true;
return 0;
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..a0e47720f60c 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
@@ -551,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
{
- return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+ if (!enable_ept)
+ return true;
+
+ return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
}
void dump_vmcs(void);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 599d73206299..ce856e0ece84 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);
-bool __read_mostly allow_smaller_maxphyaddr;
+bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
static u64 __read_mostly host_xss;
@@ -975,7 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+ X86_CR4_SMEP;
+ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
@@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (kvm_x86_ops.set_cr4(vcpu, cr4))
return 1;
- if (((cr4 ^ old_cr4) & pdptr_bits) ||
+ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
@@ -1116,14 +1117,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- /* fall through */
case 6:
if (!kvm_dr6_valid(val))
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break;
case 5:
- /* fall through */
default: /* 7 */
if (!kvm_dr7_valid(val))
return -1; /* #GP */
@@ -1154,12 +1153,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
- /* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
- /* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
@@ -2735,7 +2732,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
- return 1;
+ return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@@ -3051,7 +3048,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
- pr = true; /* fall through */
+ pr = true;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
@@ -3224,9 +3222,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_POWER_CTL:
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
break;
- case MSR_IA32_TSC:
- msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
+ case MSR_IA32_TSC: {
+ /*
+ * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
+ * even when not intercepted. AMD manual doesn't explicitly
+ * state this but appears to behave the same.
+ *
+ * On userspace reads and writes, however, we unconditionally
+ * operate L1's TSC value to ensure backwards-compatible
+ * behavior for migration.
+ */
+ u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
+ vcpu->arch.tsc_offset;
+
+ msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
break;
+ }
case MSR_MTRRcap:
case 0x200 ... 0x2ff:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3581,6 +3592,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = sched_info_on();
+ break;
default:
break;
}
@@ -4359,7 +4373,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0])
return -EINVAL;
- /* fall through */
+ fallthrough;
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
@@ -8672,7 +8686,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- /* fall through */
+ fallthrough;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
@@ -10751,9 +10765,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
struct x86_exception fault;
+ u32 access = error_code &
+ (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
- vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
+ vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index d46fff11f06f..bad4dee4f0e4 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
endif
-CFLAGS_cmdline.o := -fno-stack-protector
+CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
endif
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
@@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o cpu.o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
+lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 4f1719e22d3c..b6da09339308 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -58,7 +58,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr) {
@@ -89,7 +89,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (!c)
@@ -151,7 +151,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if ((c == '=') && !*opptr) {
@@ -172,7 +172,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (myisspace(c))
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
new file mode 100644
index 000000000000..c13e8c9ee926
--- /dev/null
+++ b/arch/x86/lib/copy_mc.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
+
+#include <linux/jump_label.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/mce.h>
+
+#ifdef CONFIG_X86_MCE
+/*
+ * See COPY_MC_TEST for self-test of the copy_mc_fragile()
+ * implementation.
+ */
+static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
+
+void enable_copy_mc_fragile(void)
+{
+ static_branch_inc(&copy_mc_fragile_key);
+}
+#define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))
+
+/*
+ * Similar to copy_user_handle_tail, probe for the write fault point, or
+ * source exception point.
+ */
+__visible notrace unsigned long
+copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
+{
+ for (; len; --len, to++, from++)
+ if (copy_mc_fragile(to, from, 1))
+ break;
+ return len;
+}
+#else
+/*
+ * No point in doing careful copying, or consulting a static key when
+ * there is no #MC handler in the CONFIG_X86_MCE=n case.
+ */
+void enable_copy_mc_fragile(void)
+{
+}
+#define copy_mc_fragile_enabled (0)
+#endif
+
+unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
+
+/**
+ * copy_mc_to_kernel - memory copy that handles source exceptions
+ *
+ * @dst: destination address
+ * @src: source address
+ * @len: number of bytes to copy
+ *
+ * Call into the 'fragile' version on systems that benefit from avoiding
+ * corner case poison consumption scenarios, For example, accessing
+ * poison across 2 cachelines with a single instruction. Almost all
+ * other uses case can use copy_mc_enhanced_fast_string() for a fast
+ * recoverable copy, or fallback to plain memcpy.
+ *
+ * Return 0 for success, or number of bytes not copied if there was an
+ * exception.
+ */
+unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
+{
+ if (copy_mc_fragile_enabled)
+ return copy_mc_fragile(dst, src, len);
+ if (static_cpu_has(X86_FEATURE_ERMS))
+ return copy_mc_enhanced_fast_string(dst, src, len);
+ memcpy(dst, src, len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+
+unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
+{
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ __uaccess_begin();
+ ret = copy_mc_fragile(dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ __uaccess_begin();
+ ret = copy_mc_enhanced_fast_string(dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ return copy_user_generic(dst, src, len);
+}
diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
new file mode 100644
index 000000000000..892d8915f609
--- /dev/null
+++ b/arch/x86/lib/copy_mc_64.S
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
+
+#include <linux/linkage.h>
+#include <asm/copy_mc_test.h>
+#include <asm/export.h>
+#include <asm/asm.h>
+
+#ifndef CONFIG_UML
+
+#ifdef CONFIG_X86_MCE
+COPY_MC_TEST_CTL
+
+/*
+ * copy_mc_fragile - copy memory with indication if an exception / fault happened
+ *
+ * The 'fragile' version is opted into by platform quirks and takes
+ * pains to avoid unrecoverable corner cases like 'fast-string'
+ * instruction sequences, and consuming poison across a cacheline
+ * boundary. The non-fragile version is equivalent to memcpy()
+ * regardless of CPU machine-check-recovery capability.
+ */
+SYM_FUNC_START(copy_mc_fragile)
+ cmpl $8, %edx
+ /* Less than 8 bytes? Go to byte copy loop */
+ jb .L_no_whole_words
+
+ /* Check for bad alignment of source */
+ testl $7, %esi
+ /* Already aligned */
+ jz .L_8byte_aligned
+
+ /* Copy one byte at a time until source is 8-byte aligned */
+ movl %esi, %ecx
+ andl $7, %ecx
+ subl $8, %ecx
+ negl %ecx
+ subl %ecx, %edx
+.L_read_leading_bytes:
+ movb (%rsi), %al
+ COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes
+ COPY_MC_TEST_DST %rdi 1 .E_leading_bytes
+.L_write_leading_bytes:
+ movb %al, (%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz .L_read_leading_bytes
+
+.L_8byte_aligned:
+ movl %edx, %ecx
+ andl $7, %edx
+ shrl $3, %ecx
+ jz .L_no_whole_words
+
+.L_read_words:
+ movq (%rsi), %r8
+ COPY_MC_TEST_SRC %rsi 8 .E_read_words
+ COPY_MC_TEST_DST %rdi 8 .E_write_words
+.L_write_words:
+ movq %r8, (%rdi)
+ addq $8, %rsi
+ addq $8, %rdi
+ decl %ecx
+ jnz .L_read_words
+
+ /* Any trailing bytes? */
+.L_no_whole_words:
+ andl %edx, %edx
+ jz .L_done_memcpy_trap
+
+ /* Copy trailing bytes */
+ movl %edx, %ecx
+.L_read_trailing_bytes:
+ movb (%rsi), %al
+ COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes
+ COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes
+.L_write_trailing_bytes:
+ movb %al, (%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz .L_read_trailing_bytes
+
+ /* Copy successful. Return zero */
+.L_done_memcpy_trap:
+ xorl %eax, %eax
+.L_done:
+ ret
+SYM_FUNC_END(copy_mc_fragile)
+EXPORT_SYMBOL_GPL(copy_mc_fragile)
+
+ .section .fixup, "ax"
+ /*
+ * Return number of bytes not copied for any failure. Note that
+ * there is no "tail" handling since the source buffer is 8-byte
+ * aligned and poison is cacheline aligned.
+ */
+.E_read_words:
+ shll $3, %ecx
+.E_leading_bytes:
+ addl %edx, %ecx
+.E_trailing_bytes:
+ mov %ecx, %eax
+ jmp .L_done
+
+ /*
+ * For write fault handling, given the destination is unaligned,
+ * we handle faults on multi-byte writes with a byte-by-byte
+ * copy up to the write-protected page.
+ */
+.E_write_words:
+ shll $3, %ecx
+ addl %edx, %ecx
+ movl %ecx, %edx
+ jmp copy_mc_fragile_handle_tail
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+ _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+ _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+ _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+ _ASM_EXTABLE(.L_write_words, .E_write_words)
+ _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+#endif /* CONFIG_X86_MCE */
+
+/*
+ * copy_mc_enhanced_fast_string - memory copy with exception handling
+ *
+ * Fast string copy + fault / exception handling. If the CPU does
+ * support machine check exception recovery, but does not support
+ * recovering from fast-string exceptions then this CPU needs to be
+ * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
+ * machine check recovery support this version should be no slower than
+ * standard memcpy.
+ */
+SYM_FUNC_START(copy_mc_enhanced_fast_string)
+ movq %rdi, %rax
+ movq %rdx, %rcx
+.L_copy:
+ rep movsb
+ /* Copy successful. Return zero */
+ xorl %eax, %eax
+ ret
+SYM_FUNC_END(copy_mc_enhanced_fast_string)
+
+ .section .fixup, "ax"
+.E_copy:
+ /*
+ * On fault %rcx is updated such that the copy instruction could
+ * optionally be restarted at the fault position, i.e. it
+ * contains 'bytes remaining'. A non-zero return indicates error
+ * to copy_mc_generic() users, or indicate short transfers to
+ * user-copy routines.
+ */
+ movq %rcx, %rax
+ ret
+
+ .previous
+
+ _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
+#endif /* !CONFIG_UML */
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 816f128a6d52..77b9b2a3b5c8 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -15,6 +15,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
+#include <asm/trapnr.h>
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
@@ -36,8 +37,8 @@
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(100b, 103b)
- _ASM_EXTABLE_UA(101b, 103b)
+ _ASM_EXTABLE_CPY(100b, 103b)
+ _ASM_EXTABLE_CPY(101b, 103b)
.endm
/*
@@ -116,26 +117,26 @@ SYM_FUNC_START(copy_user_generic_unrolled)
60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
.previous
- _ASM_EXTABLE_UA(1b, 30b)
- _ASM_EXTABLE_UA(2b, 30b)
- _ASM_EXTABLE_UA(3b, 30b)
- _ASM_EXTABLE_UA(4b, 30b)
- _ASM_EXTABLE_UA(5b, 30b)
- _ASM_EXTABLE_UA(6b, 30b)
- _ASM_EXTABLE_UA(7b, 30b)
- _ASM_EXTABLE_UA(8b, 30b)
- _ASM_EXTABLE_UA(9b, 30b)
- _ASM_EXTABLE_UA(10b, 30b)
- _ASM_EXTABLE_UA(11b, 30b)
- _ASM_EXTABLE_UA(12b, 30b)
- _ASM_EXTABLE_UA(13b, 30b)
- _ASM_EXTABLE_UA(14b, 30b)
- _ASM_EXTABLE_UA(15b, 30b)
- _ASM_EXTABLE_UA(16b, 30b)
- _ASM_EXTABLE_UA(18b, 40b)
- _ASM_EXTABLE_UA(19b, 40b)
- _ASM_EXTABLE_UA(21b, 50b)
- _ASM_EXTABLE_UA(22b, 50b)
+ _ASM_EXTABLE_CPY(1b, 30b)
+ _ASM_EXTABLE_CPY(2b, 30b)
+ _ASM_EXTABLE_CPY(3b, 30b)
+ _ASM_EXTABLE_CPY(4b, 30b)
+ _ASM_EXTABLE_CPY(5b, 30b)
+ _ASM_EXTABLE_CPY(6b, 30b)
+ _ASM_EXTABLE_CPY(7b, 30b)
+ _ASM_EXTABLE_CPY(8b, 30b)
+ _ASM_EXTABLE_CPY(9b, 30b)
+ _ASM_EXTABLE_CPY(10b, 30b)
+ _ASM_EXTABLE_CPY(11b, 30b)
+ _ASM_EXTABLE_CPY(12b, 30b)
+ _ASM_EXTABLE_CPY(13b, 30b)
+ _ASM_EXTABLE_CPY(14b, 30b)
+ _ASM_EXTABLE_CPY(15b, 30b)
+ _ASM_EXTABLE_CPY(16b, 30b)
+ _ASM_EXTABLE_CPY(18b, 40b)
+ _ASM_EXTABLE_CPY(19b, 40b)
+ _ASM_EXTABLE_CPY(21b, 50b)
+ _ASM_EXTABLE_CPY(22b, 50b)
SYM_FUNC_END(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
@@ -180,8 +181,8 @@ SYM_FUNC_START(copy_user_generic_string)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, 11b)
- _ASM_EXTABLE_UA(3b, 12b)
+ _ASM_EXTABLE_CPY(1b, 11b)
+ _ASM_EXTABLE_CPY(3b, 12b)
SYM_FUNC_END(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
@@ -213,7 +214,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, 12b)
+ _ASM_EXTABLE_CPY(1b, 12b)
SYM_FUNC_END(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
@@ -221,6 +222,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
+ * Don't try to copy the tail if machine check happened
*
* Input:
* rdi destination
@@ -232,12 +234,25 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
*/
SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx
+ cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */
+ je 3f
1: rep movsb
2: mov %ecx,%eax
ASM_CLAC
ret
- _ASM_EXTABLE_UA(1b, 2b)
+ /*
+ * Return zero to pretend that this copy succeeded. This
+ * is counter-intuitive, but needed to prevent the code
+ * in lib/iov_iter.c from retrying and running back into
+ * the poison cache line again. The machine check handler
+ * will ensure that a SIGBUS is sent to the task.
+ */
+3: xorl %eax,%eax
+ ASM_CLAC
+ ret
+
+ _ASM_EXTABLE_CPY(1b, 2b)
SYM_CODE_END(.Lcopy_user_handle_tail)
/*
@@ -366,27 +381,27 @@ SYM_FUNC_START(__copy_user_nocache)
jmp .Lcopy_user_handle_tail
.previous
- _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy)
- _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy)
- _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy)
- _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
- _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
- _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
+ _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
+ _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
SYM_FUNC_END(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 31600d851fd8..5e69603ff63f 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -179,7 +179,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
if (insn->addr_bytes == 2)
return -EINVAL;
- /* fall through */
+ fallthrough;
case -EDOM:
case offsetof(struct pt_regs, bx):
@@ -362,7 +362,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_GS:
return vm86regs->gs;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -386,7 +385,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
*/
return get_user_gs(regs);
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -786,7 +784,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
*/
return INSN_CODE_SEG_PARAMS(4, 8);
case 3: /* Invalid setting. CS.L=1, CS.D=1 */
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index bbcc05bcefad..037faac46b0c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,7 +4,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
-#include <asm/mcsafe_test.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
@@ -187,117 +186,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
SYM_FUNC_END(memcpy_orig)
.popsection
-
-#ifndef CONFIG_UML
-
-MCSAFE_TEST_CTL
-
-/*
- * __memcpy_mcsafe - memory copy with machine check exception handling
- * Note that we only catch machine checks when reading the source addresses.
- * Writes to target are posted and don't generate machine checks.
- */
-SYM_FUNC_START(__memcpy_mcsafe)
- cmpl $8, %edx
- /* Less than 8 bytes? Go to byte copy loop */
- jb .L_no_whole_words
-
- /* Check for bad alignment of source */
- testl $7, %esi
- /* Already aligned */
- jz .L_8byte_aligned
-
- /* Copy one byte at a time until source is 8-byte aligned */
- movl %esi, %ecx
- andl $7, %ecx
- subl $8, %ecx
- negl %ecx
- subl %ecx, %edx
-.L_read_leading_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
- MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
-.L_write_leading_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_leading_bytes
-
-.L_8byte_aligned:
- movl %edx, %ecx
- andl $7, %edx
- shrl $3, %ecx
- jz .L_no_whole_words
-
-.L_read_words:
- movq (%rsi), %r8
- MCSAFE_TEST_SRC %rsi 8 .E_read_words
- MCSAFE_TEST_DST %rdi 8 .E_write_words
-.L_write_words:
- movq %r8, (%rdi)
- addq $8, %rsi
- addq $8, %rdi
- decl %ecx
- jnz .L_read_words
-
- /* Any trailing bytes? */
-.L_no_whole_words:
- andl %edx, %edx
- jz .L_done_memcpy_trap
-
- /* Copy trailing bytes */
- movl %edx, %ecx
-.L_read_trailing_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
- MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
-.L_write_trailing_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_trailing_bytes
-
- /* Copy successful. Return zero */
-.L_done_memcpy_trap:
- xorl %eax, %eax
-.L_done:
- ret
-SYM_FUNC_END(__memcpy_mcsafe)
-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
-
- .section .fixup, "ax"
- /*
- * Return number of bytes not copied for any failure. Note that
- * there is no "tail" handling since the source buffer is 8-byte
- * aligned and poison is cacheline aligned.
- */
-.E_read_words:
- shll $3, %ecx
-.E_leading_bytes:
- addl %edx, %ecx
-.E_trailing_bytes:
- mov %ecx, %eax
- jmp .L_done
-
- /*
- * For write fault handling, given the destination is unaligned,
- * we handle faults on multi-byte writes with a byte-by-byte
- * copy up to the write-protected page.
- */
-.E_write_words:
- shll $3, %ecx
- addl %edx, %ecx
- movl %ecx, %edx
- jmp mcsafe_handle_tail
-
- .previous
-
- _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
- _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
- _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE(.L_write_words, .E_write_words)
- _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
-#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b0dfac3d3df7..508c81e97ab1 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -56,27 +56,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);
-/*
- * Similar to copy_user_handle_tail, probe for the write fault point,
- * but reuse __memcpy_mcsafe in case a new read error is encountered.
- * clac() is handled in _copy_to_iter_mcsafe().
- */
-__visible notrace unsigned long
-mcsafe_handle_tail(char *to, char *from, unsigned len)
-{
- for (; len; --len, to++, from++) {
- /*
- * Call the assembly routine back directly since
- * memcpy_mcsafe() may silently fallback to memcpy.
- */
- unsigned long rem = __memcpy_mcsafe(to, from, 1);
-
- if (rem)
- break;
- }
- return len;
-}
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
@@ -120,7 +99,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
- clean_cache_range(dst, 1);
+ clean_cache_range(dst, size);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 73dc66d887f3..ec071cbb0804 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -186,7 +186,7 @@ void FPU_printall(void)
case TAG_Special:
/* Update tagi for the printk below */
tagi = FPU_Special(r);
- /* fall through */
+ fallthrough;
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 127ea54122d7..4a9887851ad8 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
case TW_Denormal:
if (denormal_operand() < 0)
return;
- /* fall through */
+ fallthrough;
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 1d6cb07f4f86..5829457f7ca3 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -80,6 +80,18 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_uaccess);
+__visible bool ex_handler_copy(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
+ regs->ip = ex_fixup_addr(fixup);
+ regs->ax = trapnr;
+ return true;
+}
+EXPORT_SYMBOL(ex_handler_copy);
+
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
@@ -125,17 +137,21 @@ __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_clear_fs);
-__visible bool ex_has_fault_handler(unsigned long ip)
+enum handler_type ex_get_fault_handler_type(unsigned long ip)
{
const struct exception_table_entry *e;
ex_handler_t handler;
e = search_exception_tables(ip);
if (!e)
- return false;
+ return EX_HANDLER_NONE;
handler = ex_fixup_handler(e);
-
- return handler == ex_handler_fault;
+ if (handler == ex_handler_fault)
+ return EX_HANDLER_FAULT;
+ else if (handler == ex_handler_uaccess || handler == ex_handler_copy)
+ return EX_HANDLER_UACCESS;
+ else
+ return EX_HANDLER_OTHER;
}
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 35f1498e9832..42606a04ae85 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -190,6 +190,53 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This is needed because there is a race condition between the time
+ * when the vmalloc mapping code updates the PMD to the point in time
+ * where it synchronizes this update with the other page-tables in the
+ * system.
+ *
+ * In this race window another thread/CPU can map an area on the same
+ * PMD, finds it already present and does not synchronize it with the
+ * rest of the system yet. As a result v[mz]alloc might return areas
+ * which are not mapped in every page-table in the system, causing an
+ * unhandled page-fault when they are accessed.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3_pa();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ if (pmd_large(*pmd_k))
+ return 0;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(vmalloc_fault);
+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -1081,7 +1128,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
return 0;
}
-static int fault_in_kernel_space(unsigned long address)
+bool fault_in_kernel_space(unsigned long address)
{
/*
* On 64-bit systems, the vsyscall page is at an address above
@@ -1110,6 +1157,37 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
+#ifdef CONFIG_X86_32
+ /*
+ * We can fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
+ *
+ * This is only needed to close a race condition on x86-32 in
+ * the vmalloc mapping/unmapping code. See the comment above
+ * vmalloc_fault() for details. On x86-64 the race does not
+ * exist as the vmalloc mappings don't need to be synchronized
+ * there.
+ */
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+#endif
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 84d85dbd1dad..9e5ccc56f8e0 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -574,7 +574,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
/* For SEV, these areas are encrypted */
if (sev_active())
break;
- /* Fallthrough */
+ fallthrough;
case E820_TYPE_PRAM:
return true;
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index c5174b4e318b..683cd12f4793 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
static int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index d1b2a889f035..40baa90e74f4 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1999,7 +1999,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/*
* Before changing the encryption attribute, we need to flush caches.
*/
- cpa_flush(&cpa, 1);
+ cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
ret = __change_page_attr_set_clr(&cpa, 1);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1a3569b43aa5..11666ba19b62 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,7 +14,6 @@
#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
-#include <asm/uv/uv.h>
#include "mm_internal.h"
@@ -555,21 +554,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, true);
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
load_new_mm_cr3(next->pgd, new_asid, false);
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
/* Make sure we write CR3 before loaded_mm. */
@@ -809,29 +799,6 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
(info->end - info->start) >> PAGE_SHIFT);
- if (is_uv_system()) {
- /*
- * This whole special case is confused. UV has a "Broadcast
- * Assist Unit", which seems to be a fancy way to send IPIs.
- * Back when x86 used an explicit TLB flush IPI, UV was
- * optimized to use its own mechanism. These days, x86 uses
- * smp_call_function_many(), but UV still uses a manual IPI,
- * and that IPI's action is out of date -- it does a manual
- * flush instead of calling flush_tlb_func_remote(). This
- * means that the percpu tlb_gen variables won't be updated
- * and we'll do pointless flushes on future context switches.
- *
- * Rather than hooking native_flush_tlb_others() here, I think
- * that UV should be updated so that smp_call_function_many(),
- * etc, are optimal on UV.
- */
- cpumask = uv_flush_tlb_others(cpumask, info);
- if (cpumask)
- smp_call_function_many(cpumask, flush_tlb_func_remote,
- (void *)info, 1);
- return;
- }
-
/*
* If no page tables were freed, we can skip sending IPIs to
* CPUs in lazy TLB mode. They will flush the CPU themselves
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9f9aad42ccff..89395a5049bb 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -26,6 +26,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
+#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f6ea8f1a9d57..d37ebe6e70d7 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -49,7 +49,6 @@
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
-#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
@@ -496,74 +495,6 @@ void __init efi_init(void)
efi_print_memmap();
}
-#if defined(CONFIG_X86_32)
-
-void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
-{
- u64 addr, npages;
-
- addr = md->virt_addr;
- npages = md->num_pages;
-
- memrange_efi_to_native(&addr, &npages);
-
- if (executable)
- set_memory_x(addr, npages);
- else
- set_memory_nx(addr, npages);
-}
-
-void __init runtime_code_page_mkexec(void)
-{
- efi_memory_desc_t *md;
-
- /* Make EFI runtime service code area executable */
- for_each_efi_memory_desc(md) {
- if (md->type != EFI_RUNTIME_SERVICES_CODE)
- continue;
-
- efi_set_executable(md, true);
- }
-}
-
-void __init efi_memory_uc(u64 addr, unsigned long size)
-{
- unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
- u64 npages;
-
- npages = round_up(size, page_shift) / page_shift;
- memrange_efi_to_native(&addr, &npages);
- set_memory_uc(addr, npages);
-}
-
-void __init old_map_region(efi_memory_desc_t *md)
-{
- u64 start_pfn, end_pfn, end;
- unsigned long size;
- void *va;
-
- start_pfn = PFN_DOWN(md->phys_addr);
- size = md->num_pages << PAGE_SHIFT;
- end = md->phys_addr + size;
- end_pfn = PFN_UP(end);
-
- if (pfn_range_is_mapped(start_pfn, end_pfn)) {
- va = __va(md->phys_addr);
-
- if (!(md->attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)va, size);
- } else
- va = efi_ioremap(md->phys_addr, size,
- md->type, md->attribute);
-
- md->virt_addr = (u64) (unsigned long) va;
- if (!va)
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
-}
-
-#endif
-
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 826ead67753d..e06a199423c0 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -29,9 +29,35 @@
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ u64 start_pfn, end_pfn, end;
+ unsigned long size;
+ void *va;
+
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
+
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ set_memory_uc((unsigned long)va, md->num_pages);
+ } else {
+ va = ioremap_cache(md->phys_addr, size);
+ }
+
+ md->virt_addr = (unsigned long)va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
+}
+
/*
* To make EFI call EFI runtime service in physical addressing mode we need
* prolog/epilog before/after the invocation to claim the EFI runtime service
@@ -58,11 +84,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 0;
}
-void __init efi_map_region(efi_memory_desc_t *md)
-{
- old_map_region(md);
-}
-
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
@@ -107,6 +128,15 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
void __init efi_runtime_update_mappings(void)
{
- if (__supported_pte_mask & _PAGE_NX)
- runtime_code_page_mkexec();
+ if (__supported_pte_mask & _PAGE_NX) {
+ efi_memory_desc_t *md;
+
+ /* Make EFI runtime service code area executable */
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+ set_memory_x(md->virt_addr, md->num_pages);
+ }
+ }
}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 413583f904a6..6af4da1149ba 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -259,6 +259,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
rodata = __pa(__start_rodata);
pfn = rodata >> PAGE_SHIFT;
+
+ pf = _PAGE_NX | _PAGE_ENC;
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
pr_err("Failed to map kernel rodata 1:1\n");
return 1;
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
index a3693c829e2e..224ff0504890 100644
--- a/arch/x86/platform/uv/Makefile
+++ b/arch/x86/platform/uv/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
+obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index a2f447dffea6..54511eaccf4d 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -2,8 +2,9 @@
/*
* BIOS run time interface routines.
*
- * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson <rja@sgi.com>
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/efi.h>
@@ -170,16 +171,27 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
(u64)decode, (u64)domain, (u64)bus, 0, 0);
}
-int uv_bios_init(void)
+unsigned long get_uv_systab_phys(bool msg)
{
- uv_systab = NULL;
if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
!uv_systab_phys || efi_runtime_disabled()) {
- pr_crit("UV: UVsystab: missing\n");
- return -EEXIST;
+ if (msg)
+ pr_crit("UV: UVsystab: missing\n");
+ return 0;
}
+ return uv_systab_phys;
+}
+
+int uv_bios_init(void)
+{
+ unsigned long uv_systab_phys_addr;
+
+ uv_systab = NULL;
+ uv_systab_phys_addr = get_uv_systab_phys(1);
+ if (!uv_systab_phys_addr)
+ return -EEXIST;
- uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab));
+ uv_systab = ioremap(uv_systab_phys_addr, sizeof(struct uv_systab));
if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
pr_err("UV: UVsystab: bad signature!\n");
iounmap(uv_systab);
@@ -191,7 +203,7 @@ int uv_bios_init(void)
int size = uv_systab->size;
iounmap(uv_systab);
- uv_systab = ioremap(uv_systab_phys, size);
+ uv_systab = ioremap(uv_systab_phys_addr, size);
if (!uv_systab) {
pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
return -EFAULT;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
deleted file mode 100644
index 62ea907668f8..000000000000
--- a/arch/x86/platform/uv/tlb_uv.c
+++ /dev/null
@@ -1,2097 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SGI UltraViolet TLB flush routines.
- *
- * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
- */
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <asm/mmu_context.h>
-#include <asm/uv/uv.h>
-#include <asm/uv/uv_mmrs.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/uv_bau.h>
-#include <asm/apic.h>
-#include <asm/tsc.h>
-#include <asm/irq_vectors.h>
-#include <asm/timer.h>
-
-static struct bau_operations ops __ro_after_init;
-
-static int timeout_us;
-static bool nobau = true;
-static int nobau_perm;
-
-/* tunables: */
-static int max_concurr = MAX_BAU_CONCURRENT;
-static int max_concurr_const = MAX_BAU_CONCURRENT;
-static int plugged_delay = PLUGGED_DELAY;
-static int plugsb4reset = PLUGSB4RESET;
-static int giveup_limit = GIVEUP_LIMIT;
-static int timeoutsb4reset = TIMEOUTSB4RESET;
-static int ipi_reset_limit = IPI_RESET_LIMIT;
-static int complete_threshold = COMPLETE_THRESHOLD;
-static int congested_respns_us = CONGESTED_RESPONSE_US;
-static int congested_reps = CONGESTED_REPS;
-static int disabled_period = DISABLED_PERIOD;
-
-static struct tunables tunables[] = {
- {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
- {&plugged_delay, PLUGGED_DELAY},
- {&plugsb4reset, PLUGSB4RESET},
- {&timeoutsb4reset, TIMEOUTSB4RESET},
- {&ipi_reset_limit, IPI_RESET_LIMIT},
- {&complete_threshold, COMPLETE_THRESHOLD},
- {&congested_respns_us, CONGESTED_RESPONSE_US},
- {&congested_reps, CONGESTED_REPS},
- {&disabled_period, DISABLED_PERIOD},
- {&giveup_limit, GIVEUP_LIMIT}
-};
-
-static struct dentry *tunables_dir;
-
-/* these correspond to the statistics printed by ptc_seq_show() */
-static char *stat_description[] = {
- "sent: number of shootdown messages sent",
- "stime: time spent sending messages",
- "numuvhubs: number of hubs targeted with shootdown",
- "numuvhubs16: number times 16 or more hubs targeted",
- "numuvhubs8: number times 8 or more hubs targeted",
- "numuvhubs4: number times 4 or more hubs targeted",
- "numuvhubs2: number times 2 or more hubs targeted",
- "numuvhubs1: number times 1 hub targeted",
- "numcpus: number of cpus targeted with shootdown",
- "dto: number of destination timeouts",
- "retries: destination timeout retries sent",
- "rok: : destination timeouts successfully retried",
- "resetp: ipi-style resource resets for plugs",
- "resett: ipi-style resource resets for timeouts",
- "giveup: fall-backs to ipi-style shootdowns",
- "sto: number of source timeouts",
- "bz: number of stay-busy's",
- "throt: number times spun in throttle",
- "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
- "recv: shootdown messages received",
- "rtime: time spent processing messages",
- "all: shootdown all-tlb messages",
- "one: shootdown one-tlb messages",
- "mult: interrupts that found multiple messages",
- "none: interrupts that found no messages",
- "retry: number of retry messages processed",
- "canc: number messages canceled by retries",
- "nocan: number retries that found nothing to cancel",
- "reset: number of ipi-style reset requests processed",
- "rcan: number messages canceled by reset requests",
- "disable: number times use of the BAU was disabled",
- "enable: number times use of the BAU was re-enabled"
-};
-
-static int __init setup_bau(char *arg)
-{
- int result;
-
- if (!arg)
- return -EINVAL;
-
- result = strtobool(arg, &nobau);
- if (result)
- return result;
-
- /* we need to flip the logic here, so that bau=y sets nobau to false */
- nobau = !nobau;
-
- if (!nobau)
- pr_info("UV BAU Enabled\n");
- else
- pr_info("UV BAU Disabled\n");
-
- return 0;
-}
-early_param("bau", setup_bau);
-
-/* base pnode in this partition */
-static int uv_base_pnode __read_mostly;
-
-static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
-static DEFINE_PER_CPU(struct bau_control, bau_control);
-static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-
-static void
-set_bau_on(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- if (nobau_perm) {
- pr_info("BAU not initialized; cannot be turned on\n");
- return;
- }
- nobau = false;
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->nobau = false;
- }
- pr_info("BAU turned on\n");
- return;
-}
-
-static void
-set_bau_off(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- nobau = true;
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->nobau = true;
- }
- pr_info("BAU turned off\n");
- return;
-}
-
-/*
- * Determine the first node on a uvhub. 'Nodes' are used for kernel
- * memory allocation.
- */
-static int __init uvhub_to_first_node(int uvhub)
-{
- int node, b;
-
- for_each_online_node(node) {
- b = uv_node_to_blade_id(node);
- if (uvhub == b)
- return node;
- }
- return -1;
-}
-
-/*
- * Determine the apicid of the first cpu on a uvhub.
- */
-static int __init uvhub_to_first_apicid(int uvhub)
-{
- int cpu;
-
- for_each_present_cpu(cpu)
- if (uvhub == uv_cpu_to_blade_id(cpu))
- return per_cpu(x86_cpu_to_apicid, cpu);
- return -1;
-}
-
-/*
- * Free a software acknowledge hardware resource by clearing its Pending
- * bit. This will return a reply to the sender.
- * If the message has timed out, a reply has already been sent by the
- * hardware but the resource has not been released. In that case our
- * clear of the Timeout bit (as well) will free the resource. No reply will
- * be sent (the hardware will only do one reply per message).
- */
-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
- int do_acknowledge)
-{
- unsigned long dw;
- struct bau_pq_entry *msg;
-
- msg = mdp->msg;
- if (!msg->canceled && do_acknowledge) {
- dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
- ops.write_l_sw_ack(dw);
- }
- msg->replied_to = 1;
- msg->swack_vec = 0;
-}
-
-/*
- * Process the receipt of a RETRY message
- */
-static void bau_process_retry_msg(struct msg_desc *mdp,
- struct bau_control *bcp)
-{
- int i;
- int cancel_count = 0;
- unsigned long msg_res;
- unsigned long mmr = 0;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_pq_entry *msg2;
- struct ptc_stats *stat = bcp->statp;
-
- stat->d_retries++;
- /*
- * cancel any message from msg+1 to the retry itself
- */
- for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
- if (msg2 > mdp->queue_last)
- msg2 = mdp->queue_first;
- if (msg2 == msg)
- break;
-
- /* same conditions for cancellation as do_reset */
- if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
- (msg2->swack_vec) && ((msg2->swack_vec &
- msg->swack_vec) == 0) &&
- (msg2->sending_cpu == msg->sending_cpu) &&
- (msg2->msg_type != MSG_NOOP)) {
- mmr = ops.read_l_sw_ack();
- msg_res = msg2->swack_vec;
- /*
- * This is a message retry; clear the resources held
- * by the previous message only if they timed out.
- * If it has not timed out we have an unexpected
- * situation to report.
- */
- if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
- unsigned long mr;
- /*
- * Is the resource timed out?
- * Make everyone ignore the cancelled message.
- */
- msg2->canceled = 1;
- stat->d_canceled++;
- cancel_count++;
- mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
- ops.write_l_sw_ack(mr);
- }
- }
- }
- if (!cancel_count)
- stat->d_nocanceled++;
-}
-
-/*
- * Do all the things a cpu should do for a TLB shootdown message.
- * Other cpu's may come here at the same time for this message.
- */
-static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
- int do_acknowledge)
-{
- short socket_ack_count = 0;
- short *sp;
- struct atomic_short *asp;
- struct ptc_stats *stat = bcp->statp;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_control *smaster = bcp->socket_master;
-
- /*
- * This must be a normal message, or retry of a normal message
- */
- if (msg->address == TLB_FLUSH_ALL) {
- flush_tlb_local();
- stat->d_alltlb++;
- } else {
- flush_tlb_one_user(msg->address);
- stat->d_onetlb++;
- }
- stat->d_requestee++;
-
- /*
- * One cpu on each uvhub has the additional job on a RETRY
- * of releasing the resource held by the message that is
- * being retried. That message is identified by sending
- * cpu number.
- */
- if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
- bau_process_retry_msg(mdp, bcp);
-
- /*
- * This is a swack message, so we have to reply to it.
- * Count each responding cpu on the socket. This avoids
- * pinging the count's cache line back and forth between
- * the sockets.
- */
- sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
- asp = (struct atomic_short *)sp;
- socket_ack_count = atom_asr(1, asp);
- if (socket_ack_count == bcp->cpus_in_socket) {
- int msg_ack_count;
- /*
- * Both sockets dump their completed count total into
- * the message's count.
- */
- *sp = 0;
- asp = (struct atomic_short *)&msg->acknowledge_count;
- msg_ack_count = atom_asr(socket_ack_count, asp);
-
- if (msg_ack_count == bcp->cpus_in_uvhub) {
- /*
- * All cpus in uvhub saw it; reply
- * (unless we are in the UV2 workaround)
- */
- reply_to_message(mdp, bcp, do_acknowledge);
- }
- }
-
- return;
-}
-
-/*
- * Determine the first cpu on a pnode.
- */
-static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
-{
- int cpu;
- struct hub_and_pnode *hpp;
-
- for_each_present_cpu(cpu) {
- hpp = &smaster->thp[cpu];
- if (pnode == hpp->pnode)
- return cpu;
- }
- return -1;
-}
-
-/*
- * Last resort when we get a large number of destination timeouts is
- * to clear resources held by a given cpu.
- * Do this with IPI so that all messages in the BAU message queue
- * can be identified by their nonzero swack_vec field.
- *
- * This is entered for a single cpu on the uvhub.
- * The sender want's this uvhub to free a specific message's
- * swack resources.
- */
-static void do_reset(void *ptr)
-{
- int i;
- struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
- struct reset_args *rap = (struct reset_args *)ptr;
- struct bau_pq_entry *msg;
- struct ptc_stats *stat = bcp->statp;
-
- stat->d_resets++;
- /*
- * We're looking for the given sender, and
- * will free its swack resource.
- * If all cpu's finally responded after the timeout, its
- * message 'replied_to' was set.
- */
- for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
- unsigned long msg_res;
- /* do_reset: same conditions for cancellation as
- bau_process_retry_msg() */
- if ((msg->replied_to == 0) &&
- (msg->canceled == 0) &&
- (msg->sending_cpu == rap->sender) &&
- (msg->swack_vec) &&
- (msg->msg_type != MSG_NOOP)) {
- unsigned long mmr;
- unsigned long mr;
- /*
- * make everyone else ignore this message
- */
- msg->canceled = 1;
- /*
- * only reset the resource if it is still pending
- */
- mmr = ops.read_l_sw_ack();
- msg_res = msg->swack_vec;
- mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
- if (mmr & msg_res) {
- stat->d_rcanceled++;
- ops.write_l_sw_ack(mr);
- }
- }
- }
- return;
-}
-
-/*
- * Use IPI to get all target uvhubs to release resources held by
- * a given sending cpu number.
- */
-static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
-{
- int pnode;
- int apnode;
- int maskbits;
- int sender = bcp->cpu;
- cpumask_t *mask = bcp->uvhub_master->cpumask;
- struct bau_control *smaster = bcp->socket_master;
- struct reset_args reset_args;
-
- reset_args.sender = sender;
- cpumask_clear(mask);
- /* find a single cpu for each uvhub in this distribution mask */
- maskbits = sizeof(struct pnmask) * BITSPERBYTE;
- /* each bit is a pnode relative to the partition base pnode */
- for (pnode = 0; pnode < maskbits; pnode++) {
- int cpu;
- if (!bau_uvhub_isset(pnode, distribution))
- continue;
- apnode = pnode + bcp->partition_base_pnode;
- cpu = pnode_to_first_cpu(apnode, smaster);
- cpumask_set_cpu(cpu, mask);
- }
-
- /* IPI all cpus; preemption is already disabled */
- smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
- return;
-}
-
-/*
- * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
- * number, not an absolute. It converts a duration in cycles to a duration in
- * ns.
- */
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- struct cyc2ns_data data;
- unsigned long long ns;
-
- cyc2ns_read_begin(&data);
- ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
- cyc2ns_read_end();
-
- return ns;
-}
-
-/*
- * The reverse of the above; converts a duration in ns to a duration in cycles.
- */
-static inline unsigned long long ns_2_cycles(unsigned long long ns)
-{
- struct cyc2ns_data data;
- unsigned long long cyc;
-
- cyc2ns_read_begin(&data);
- cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
- cyc2ns_read_end();
-
- return cyc;
-}
-
-static inline unsigned long cycles_2_us(unsigned long long cyc)
-{
- return cycles_2_ns(cyc) / NSEC_PER_USEC;
-}
-
-static inline cycles_t sec_2_cycles(unsigned long sec)
-{
- return ns_2_cycles(sec * NSEC_PER_SEC);
-}
-
-static inline unsigned long long usec_2_cycles(unsigned long usec)
-{
- return ns_2_cycles(usec * NSEC_PER_USEC);
-}
-
-/*
- * wait for all cpus on this hub to finish their sends and go quiet
- * leaves uvhub_quiesce set so that no new broadcasts are started by
- * bau_flush_send_and_wait()
- */
-static inline void quiesce_local_uvhub(struct bau_control *hmaster)
-{
- atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
-}
-
-/*
- * mark this quiet-requestor as done
- */
-static inline void end_uvhub_quiesce(struct bau_control *hmaster)
-{
- atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
-}
-
-/*
- * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
- * But not currently used.
- */
-static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
-{
- return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
-}
-
-/*
- * Entered when a bau descriptor has gone into a permanent busy wait because
- * of a hardware bug.
- * Workaround the bug.
- */
-static int handle_uv2_busy(struct bau_control *bcp)
-{
- struct ptc_stats *stat = bcp->statp;
-
- stat->s_uv2_wars++;
- bcp->busy = 1;
- return FLUSH_GIVEUP;
-}
-
-static int uv2_3_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- unsigned long descriptor_stat;
- cycles_t ttm;
- u64 mmr_offset = bcp->status_mmr;
- int right_shift = bcp->status_index;
- int desc = bcp->uvhub_cpu;
- long busy_reps = 0;
- struct ptc_stats *stat = bcp->statp;
-
- descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
-
- /* spin on the status MMR, waiting for it to go idle */
- while (descriptor_stat != UV2H_DESC_IDLE) {
- if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
- /*
- * A h/w bug on the destination side may
- * have prevented the message being marked
- * pending, thus it doesn't get replied to
- * and gets continually nacked until it times
- * out with a SOURCE_TIMEOUT.
- */
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
- } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
- ttm = get_cycles();
-
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- * Without using the extended status we have to
- * deduce from the short time that this was a
- * strong nack.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- stat->s_plugged++;
- /* FLUSH_RETRY_PLUGGED causes hang on boot */
- return FLUSH_GIVEUP;
- }
- stat->s_dtimeout++;
- bcp->conseccompletes = 0;
- /* FLUSH_RETRY_TIMEOUT causes hang on boot */
- return FLUSH_GIVEUP;
- } else {
- busy_reps++;
- if (busy_reps > 1000000) {
- /* not to hammer on the clock */
- busy_reps = 0;
- ttm = get_cycles();
- if ((ttm - bcp->send_message) > bcp->timeout_interval)
- return handle_uv2_busy(bcp);
- }
- /*
- * descriptor_stat is still BUSY
- */
- cpu_relax();
- }
- descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
-/*
- * Returns the status of current BAU message for cpu desc as a bit field
- * [Error][Busy][Aux]
- */
-static u64 read_status(u64 status_mmr, int index, int desc)
-{
- u64 stat;
-
- stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
- stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
-
- return stat;
-}
-
-static int uv4_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- struct ptc_stats *stat = bcp->statp;
- u64 descriptor_stat;
- u64 mmr = bcp->status_mmr;
- int index = bcp->status_index;
- int desc = bcp->uvhub_cpu;
-
- descriptor_stat = read_status(mmr, index, desc);
-
- /* spin on the status MMR, waiting for it to go idle */
- while (descriptor_stat != UV2H_DESC_IDLE) {
- switch (descriptor_stat) {
- case UV2H_DESC_SOURCE_TIMEOUT:
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
-
- case UV2H_DESC_DEST_TIMEOUT:
- stat->s_dtimeout++;
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
-
- case UV2H_DESC_DEST_STRONG_NACK:
- stat->s_plugged++;
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
-
- case UV2H_DESC_DEST_PUT_ERR:
- bcp->conseccompletes = 0;
- return FLUSH_GIVEUP;
-
- default:
- /* descriptor_stat is still BUSY */
- cpu_relax();
- }
- descriptor_stat = read_status(mmr, index, desc);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
-/*
- * Our retries are blocked by all destination sw ack resources being
- * in use, and a timeout is pending. In that case hardware immediately
- * returns the ERROR that looks like a destination timeout.
- */
-static void destination_plugged(struct bau_desc *bau_desc,
- struct bau_control *bcp,
- struct bau_control *hmaster, struct ptc_stats *stat)
-{
- udelay(bcp->plugged_delay);
- bcp->plugged_tries++;
-
- if (bcp->plugged_tries >= bcp->plugsb4reset) {
- bcp->plugged_tries = 0;
-
- quiesce_local_uvhub(hmaster);
-
- spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- bcp->ipi_attempts++;
- stat->s_resets_plug++;
- }
-}
-
-static void destination_timeout(struct bau_desc *bau_desc,
- struct bau_control *bcp, struct bau_control *hmaster,
- struct ptc_stats *stat)
-{
- hmaster->max_concurr = 1;
- bcp->timeout_tries++;
- if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
- bcp->timeout_tries = 0;
-
- quiesce_local_uvhub(hmaster);
-
- spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- bcp->ipi_attempts++;
- stat->s_resets_timeout++;
- }
-}
-
-/*
- * Stop all cpus on a uvhub from using the BAU for a period of time.
- * This is reversed by check_enable.
- */
-static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
-{
- int tcpu;
- struct bau_control *tbcp;
- struct bau_control *hmaster;
- cycles_t tm1;
-
- hmaster = bcp->uvhub_master;
- spin_lock(&hmaster->disable_lock);
- if (!bcp->baudisabled) {
- stat->s_bau_disabled++;
- tm1 = get_cycles();
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- if (tbcp->uvhub_master == hmaster) {
- tbcp->baudisabled = 1;
- tbcp->set_bau_on_time =
- tm1 + bcp->disabled_period;
- }
- }
- }
- spin_unlock(&hmaster->disable_lock);
-}
-
-static void count_max_concurr(int stat, struct bau_control *bcp,
- struct bau_control *hmaster)
-{
- bcp->plugged_tries = 0;
- bcp->timeout_tries = 0;
- if (stat != FLUSH_COMPLETE)
- return;
- if (bcp->conseccompletes <= bcp->complete_threshold)
- return;
- if (hmaster->max_concurr >= hmaster->max_concurr_const)
- return;
- hmaster->max_concurr++;
-}
-
-static void record_send_stats(cycles_t time1, cycles_t time2,
- struct bau_control *bcp, struct ptc_stats *stat,
- int completion_status, int try)
-{
- cycles_t elapsed;
-
- if (time2 > time1) {
- elapsed = time2 - time1;
- stat->s_time += elapsed;
-
- if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
- bcp->period_requests++;
- bcp->period_time += elapsed;
- if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
- (bcp->period_requests > bcp->cong_reps) &&
- ((bcp->period_time / bcp->period_requests) >
- usec_2_cycles(bcp->cong_response_us))) {
- stat->s_congested++;
- disable_for_period(bcp, stat);
- }
- }
- } else
- stat->s_requestor--;
-
- if (completion_status == FLUSH_COMPLETE && try > 1)
- stat->s_retriesok++;
- else if (completion_status == FLUSH_GIVEUP) {
- stat->s_giveup++;
- if (get_cycles() > bcp->period_end)
- bcp->period_giveups = 0;
- bcp->period_giveups++;
- if (bcp->period_giveups == 1)
- bcp->period_end = get_cycles() + bcp->disabled_period;
- if (bcp->period_giveups > bcp->giveup_limit) {
- disable_for_period(bcp, stat);
- stat->s_giveuplimit++;
- }
- }
-}
-
-/*
- * Handle the completion status of a message send.
- */
-static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
- struct bau_control *bcp, struct bau_control *hmaster,
- struct ptc_stats *stat)
-{
- if (completion_status == FLUSH_RETRY_PLUGGED)
- destination_plugged(bau_desc, bcp, hmaster, stat);
- else if (completion_status == FLUSH_RETRY_TIMEOUT)
- destination_timeout(bau_desc, bcp, hmaster, stat);
-}
-
-/*
- * Send a broadcast and wait for it to complete.
- *
- * The flush_mask contains the cpus the broadcast is to be sent to including
- * cpus that are on the local uvhub.
- *
- * Returns 0 if all flushing represented in the mask was done.
- * Returns 1 if it gives up entirely and the original cpu mask is to be
- * returned to the kernel.
- */
-static int uv_flush_send_and_wait(struct cpumask *flush_mask,
- struct bau_control *bcp,
- struct bau_desc *bau_desc)
-{
- int seq_number = 0;
- int completion_stat = 0;
- long try = 0;
- unsigned long index;
- cycles_t time1;
- cycles_t time2;
- struct ptc_stats *stat = bcp->statp;
- struct bau_control *hmaster = bcp->uvhub_master;
- struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
-
- while (hmaster->uvhub_quiesce)
- cpu_relax();
-
- time1 = get_cycles();
- uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
-
- do {
- if (try == 0) {
- uv2_3_hdr->msg_type = MSG_REGULAR;
- seq_number = bcp->message_number++;
- } else {
- uv2_3_hdr->msg_type = MSG_RETRY;
- stat->s_retry_messages++;
- }
-
- uv2_3_hdr->sequence = seq_number;
- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
- bcp->send_message = get_cycles();
-
- write_mmr_activation(index);
-
- try++;
- completion_stat = ops.wait_completion(bau_desc, bcp, try);
-
- handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
-
- if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
- bcp->ipi_attempts = 0;
- stat->s_overipilimit++;
- completion_stat = FLUSH_GIVEUP;
- break;
- }
- cpu_relax();
- } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
- (completion_stat == FLUSH_RETRY_TIMEOUT));
-
- time2 = get_cycles();
-
- count_max_concurr(completion_stat, bcp, hmaster);
-
- while (hmaster->uvhub_quiesce)
- cpu_relax();
-
- atomic_dec(&hmaster->active_descriptor_count);
-
- record_send_stats(time1, time2, bcp, stat, completion_stat, try);
-
- if (completion_stat == FLUSH_GIVEUP)
- /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
- return 1;
- return 0;
-}
-
-/*
- * The BAU is disabled for this uvhub. When the disabled time period has
- * expired re-enable it.
- * Return 0 if it is re-enabled for all cpus on this uvhub.
- */
-static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
-{
- int tcpu;
- struct bau_control *tbcp;
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
- spin_lock(&hmaster->disable_lock);
- if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
- stat->s_bau_reenabled++;
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
- if (tbcp->uvhub_master == hmaster) {
- tbcp->baudisabled = 0;
- tbcp->period_requests = 0;
- tbcp->period_time = 0;
- tbcp->period_giveups = 0;
- }
- }
- spin_unlock(&hmaster->disable_lock);
- return 0;
- }
- spin_unlock(&hmaster->disable_lock);
- return -1;
-}
-
-static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
- int remotes, struct bau_desc *bau_desc)
-{
- stat->s_requestor++;
- stat->s_ntargcpu += remotes + locals;
- stat->s_ntargremotes += remotes;
- stat->s_ntarglocals += locals;
-
- /* uvhub statistics */
- hubs = bau_uvhub_weight(&bau_desc->distribution);
- if (locals) {
- stat->s_ntarglocaluvhub++;
- stat->s_ntargremoteuvhub += (hubs - 1);
- } else
- stat->s_ntargremoteuvhub += hubs;
-
- stat->s_ntarguvhub += hubs;
-
- if (hubs >= 16)
- stat->s_ntarguvhub16++;
- else if (hubs >= 8)
- stat->s_ntarguvhub8++;
- else if (hubs >= 4)
- stat->s_ntarguvhub4++;
- else if (hubs >= 2)
- stat->s_ntarguvhub2++;
- else
- stat->s_ntarguvhub1++;
-}
-
-/*
- * Translate a cpu mask to the uvhub distribution mask in the BAU
- * activation descriptor.
- */
-static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
- struct bau_desc *bau_desc, int *localsp, int *remotesp)
-{
- int cpu;
- int pnode;
- int cnt = 0;
- struct hub_and_pnode *hpp;
-
- for_each_cpu(cpu, flush_mask) {
- /*
- * The distribution vector is a bit map of pnodes, relative
- * to the partition base pnode (and the partition base nasid
- * in the header).
- * Translate cpu to pnode and hub using a local memory array.
- */
- hpp = &bcp->socket_master->thp[cpu];
- pnode = hpp->pnode - bcp->partition_base_pnode;
- bau_uvhub_set(pnode, &bau_desc->distribution);
- cnt++;
- if (hpp->uvhub == bcp->uvhub)
- (*localsp)++;
- else
- (*remotesp)++;
- }
- if (!cnt)
- return 1;
- return 0;
-}
-
-/*
- * globally purge translation cache of a virtual address or all TLB's
- * @cpumask: mask of all cpu's in which the address is to be removed
- * @mm: mm_struct containing virtual address range
- * @start: start virtual address to be removed from TLB
- * @end: end virtual address to be remove from TLB
- * @cpu: the current cpu
- *
- * This is the entry point for initiating any UV global TLB shootdown.
- *
- * Purges the translation caches of all specified processors of the given
- * virtual address, or purges all TLB's on specified processors.
- *
- * The caller has derived the cpumask from the mm_struct. This function
- * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
- *
- * The cpumask is converted into a uvhubmask of the uvhubs containing
- * those cpus.
- *
- * Note that this function should be called with preemption disabled.
- *
- * Returns NULL if all remote flushing was done.
- * Returns pointer to cpumask if some remote flushing remains to be
- * done. The returned pointer is valid till preemption is re-enabled.
- */
-const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
-{
- unsigned int cpu = smp_processor_id();
- int locals = 0, remotes = 0, hubs = 0;
- struct bau_desc *bau_desc;
- struct cpumask *flush_mask;
- struct ptc_stats *stat;
- struct bau_control *bcp;
- unsigned long descriptor_status, status, address;
-
- bcp = &per_cpu(bau_control, cpu);
-
- if (bcp->nobau)
- return cpumask;
-
- stat = bcp->statp;
- stat->s_enters++;
-
- if (bcp->busy) {
- descriptor_status =
- read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
- status = ((descriptor_status >> (bcp->uvhub_cpu *
- UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
- if (status == UV2H_DESC_BUSY)
- return cpumask;
- bcp->busy = 0;
- }
-
- /* bau was disabled due to slow response */
- if (bcp->baudisabled) {
- if (check_enable(bcp, stat)) {
- stat->s_ipifordisabled++;
- return cpumask;
- }
- }
-
- /*
- * Each sending cpu has a per-cpu mask which it fills from the caller's
- * cpu mask. All cpus are converted to uvhubs and copied to the
- * activation descriptor.
- */
- flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
- /* don't actually do a shootdown of the local cpu */
- cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
-
- if (cpumask_test_cpu(cpu, cpumask))
- stat->s_ntargself++;
-
- bau_desc = bcp->descriptor_base;
- bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
- bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
- if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
- return NULL;
-
- record_send_statistics(stat, locals, hubs, remotes, bau_desc);
-
- if (!info->end || (info->end - info->start) <= PAGE_SIZE)
- address = info->start;
- else
- address = TLB_FLUSH_ALL;
-
- switch (bcp->uvhub_version) {
- case UV_BAU_V2:
- case UV_BAU_V3:
- bau_desc->payload.uv2_3.address = address;
- bau_desc->payload.uv2_3.sending_cpu = cpu;
- break;
- case UV_BAU_V4:
- bau_desc->payload.uv4.address = address;
- bau_desc->payload.uv4.sending_cpu = cpu;
- bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
- break;
- }
-
- /*
- * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
- * or 1 if it gave up and the original cpumask should be returned.
- */
- if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
- return NULL;
- else
- return cpumask;
-}
-
-/*
- * Search the message queue for any 'other' unprocessed message with the
- * same software acknowledge resource bit vector as the 'msg' message.
- */
-static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
- struct bau_control *bcp)
-{
- struct bau_pq_entry *msg_next = msg + 1;
- unsigned char swack_vec = msg->swack_vec;
-
- if (msg_next > bcp->queue_last)
- msg_next = bcp->queue_first;
- while (msg_next != msg) {
- if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
- (msg_next->swack_vec == swack_vec))
- return msg_next;
- msg_next++;
- if (msg_next > bcp->queue_last)
- msg_next = bcp->queue_first;
- }
- return NULL;
-}
-
-/*
- * UV2 needs to work around a bug in which an arriving message has not
- * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
- * Such a message must be ignored.
- */
-static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
-{
- unsigned long mmr_image;
- unsigned char swack_vec;
- struct bau_pq_entry *msg = mdp->msg;
- struct bau_pq_entry *other_msg;
-
- mmr_image = ops.read_l_sw_ack();
- swack_vec = msg->swack_vec;
-
- if ((swack_vec & mmr_image) == 0) {
- /*
- * This message was assigned a swack resource, but no
- * reserved acknowlegment is pending.
- * The bug has prevented this message from setting the MMR.
- */
- /*
- * Some message has set the MMR 'pending' bit; it might have
- * been another message. Look for that message.
- */
- other_msg = find_another_by_swack(msg, bcp);
- if (other_msg) {
- /*
- * There is another. Process this one but do not
- * ack it.
- */
- bau_process_message(mdp, bcp, 0);
- /*
- * Let the natural processing of that other message
- * acknowledge it. Don't get the processing of sw_ack's
- * out of order.
- */
- return;
- }
- }
-
- /*
- * Either the MMR shows this one pending a reply or there is no
- * other message using this sw_ack, so it is safe to acknowledge it.
- */
- bau_process_message(mdp, bcp, 1);
-
- return;
-}
-
-/*
- * The BAU message interrupt comes here. (registered by set_intr_gate)
- * See entry_64.S
- *
- * We received a broadcast assist message.
- *
- * Interrupts are disabled; this interrupt could represent
- * the receipt of several messages.
- *
- * All cores/threads on this hub get this interrupt.
- * The last one to see it does the software ack.
- * (the resource will not be freed until noninterruptable cpus see this
- * interrupt; hardware may timeout the s/w ack and reply ERROR)
- */
-DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
-{
- int count = 0;
- cycles_t time_start;
- struct bau_pq_entry *msg;
- struct bau_control *bcp;
- struct ptc_stats *stat;
- struct msg_desc msgdesc;
-
- ack_APIC_irq();
- kvm_set_cpu_l1tf_flush_l1d();
- time_start = get_cycles();
-
- bcp = &per_cpu(bau_control, smp_processor_id());
- stat = bcp->statp;
-
- msgdesc.queue_first = bcp->queue_first;
- msgdesc.queue_last = bcp->queue_last;
-
- msg = bcp->bau_msg_head;
- while (msg->swack_vec) {
- count++;
-
- msgdesc.msg_slot = msg - msgdesc.queue_first;
- msgdesc.msg = msg;
- if (bcp->uvhub_version == UV_BAU_V2)
- process_uv2_message(&msgdesc, bcp);
- else
- /* no error workaround for uv3 */
- bau_process_message(&msgdesc, bcp, 1);
-
- msg++;
- if (msg > msgdesc.queue_last)
- msg = msgdesc.queue_first;
- bcp->bau_msg_head = msg;
- }
- stat->d_time += (get_cycles() - time_start);
- if (!count)
- stat->d_nomsg++;
- else if (count > 1)
- stat->d_multmsg++;
-}
-
-/*
- * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
- * shootdown message timeouts enabled. The timeout does not cause
- * an interrupt, but causes an error message to be returned to
- * the sender.
- */
-static void __init enable_timeouts(void)
-{
- int uvhub;
- int nuvhubs;
- int pnode;
- unsigned long mmr_image;
-
- nuvhubs = uv_num_possible_blades();
-
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- if (!uv_blade_nr_possible_cpus(uvhub))
- continue;
-
- pnode = uv_blade_to_pnode(uvhub);
- mmr_image = read_mmr_misc_control(pnode);
- /*
- * Set the timeout period and then lock it in, in three
- * steps; captures and locks in the period.
- *
- * To program the period, the SOFT_ACK_MODE must be off.
- */
- mmr_image &= ~(1L << SOFTACK_MSHIFT);
- write_mmr_misc_control(pnode, mmr_image);
- /*
- * Set the 4-bit period.
- */
- mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
- mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
- write_mmr_misc_control(pnode, mmr_image);
-
- mmr_image |= (1L << SOFTACK_MSHIFT);
- if (is_uv2_hub()) {
- /* do not touch the legacy mode bit */
- /* hw bug workaround; do not use extended status */
- mmr_image &= ~(1L << UV2_EXT_SHFT);
- } else if (is_uv3_hub()) {
- mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
- mmr_image |= (1L << SB_STATUS_SHFT);
- }
- write_mmr_misc_control(pnode, mmr_image);
- }
-}
-
-static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
-{
- if (*offset < num_possible_cpus())
- return offset;
- return NULL;
-}
-
-static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
-{
- (*offset)++;
- if (*offset < num_possible_cpus())
- return offset;
- return NULL;
-}
-
-static void ptc_seq_stop(struct seq_file *file, void *data)
-{
-}
-
-/*
- * Display the statistics thru /proc/sgi_uv/ptc_statistics
- * 'data' points to the cpu number
- * Note: see the descriptions in stat_description[].
- */
-static int ptc_seq_show(struct seq_file *file, void *data)
-{
- struct ptc_stats *stat;
- struct bau_control *bcp;
- int cpu;
-
- cpu = *(loff_t *)data;
- if (!cpu) {
- seq_puts(file,
- "# cpu bauoff sent stime self locals remotes ncpus localhub ");
- seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
- seq_puts(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
- seq_puts(file,
- "rok resetp resett giveup sto bz throt disable ");
- seq_puts(file,
- "enable wars warshw warwaits enters ipidis plugged ");
- seq_puts(file,
- "ipiover glim cong swack recv rtime all one mult ");
- seq_puts(file, "none retry canc nocan reset rcan\n");
- }
- if (cpu < num_possible_cpus() && cpu_online(cpu)) {
- bcp = &per_cpu(bau_control, cpu);
- if (bcp->nobau) {
- seq_printf(file, "cpu %d bau disabled\n", cpu);
- return 0;
- }
- stat = bcp->statp;
- /* source side statistics */
- seq_printf(file,
- "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- cpu, bcp->nobau, stat->s_requestor,
- cycles_2_us(stat->s_time),
- stat->s_ntargself, stat->s_ntarglocals,
- stat->s_ntargremotes, stat->s_ntargcpu,
- stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
- stat->s_ntarguvhub, stat->s_ntarguvhub16);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
- stat->s_ntarguvhub8, stat->s_ntarguvhub4,
- stat->s_ntarguvhub2, stat->s_ntarguvhub1,
- stat->s_dtimeout, stat->s_strongnacks);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
- stat->s_retry_messages, stat->s_retriesok,
- stat->s_resets_plug, stat->s_resets_timeout,
- stat->s_giveup, stat->s_stimeout,
- stat->s_busy, stat->s_throttles);
- seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- stat->s_bau_disabled, stat->s_bau_reenabled,
- stat->s_uv2_wars, stat->s_uv2_wars_hw,
- stat->s_uv2_war_waits, stat->s_enters,
- stat->s_ipifordisabled, stat->s_plugged,
- stat->s_overipilimit, stat->s_giveuplimit,
- stat->s_congested);
-
- /* destination side statistics */
- seq_printf(file,
- "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
- ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
- stat->d_requestee, cycles_2_us(stat->d_time),
- stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
- stat->d_nomsg, stat->d_retries, stat->d_canceled,
- stat->d_nocanceled, stat->d_resets,
- stat->d_rcanceled);
- }
- return 0;
-}
-
-/*
- * Display the tunables thru debugfs
- */
-static ssize_t tunables_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int ret;
-
- buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
- "max_concur plugged_delay plugsb4reset timeoutsb4reset",
- "ipi_reset_limit complete_threshold congested_response_us",
- "congested_reps disabled_period giveup_limit",
- max_concurr, plugged_delay, plugsb4reset,
- timeoutsb4reset, ipi_reset_limit, complete_threshold,
- congested_respns_us, congested_reps, disabled_period,
- giveup_limit);
-
- if (!buf)
- return -ENOMEM;
-
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
- kfree(buf);
- return ret;
-}
-
-/*
- * handle a write to /proc/sgi_uv/ptc_statistics
- * -1: reset the statistics
- * 0: display meaning of the statistics
- */
-static ssize_t ptc_proc_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
-{
- int cpu;
- int i;
- int elements;
- long input_arg;
- char optstr[64];
- struct ptc_stats *stat;
-
- if (count == 0 || count > sizeof(optstr))
- return -EINVAL;
- if (copy_from_user(optstr, user, count))
- return -EFAULT;
- optstr[count - 1] = '\0';
-
- if (!strcmp(optstr, "on")) {
- set_bau_on();
- return count;
- } else if (!strcmp(optstr, "off")) {
- set_bau_off();
- return count;
- }
-
- if (kstrtol(optstr, 10, &input_arg) < 0) {
- pr_debug("%s is invalid\n", optstr);
- return -EINVAL;
- }
-
- if (input_arg == 0) {
- elements = ARRAY_SIZE(stat_description);
- pr_debug("# cpu: cpu number\n");
- pr_debug("Sender statistics:\n");
- for (i = 0; i < elements; i++)
- pr_debug("%s\n", stat_description[i]);
- } else if (input_arg == -1) {
- for_each_present_cpu(cpu) {
- stat = &per_cpu(ptcstats, cpu);
- memset(stat, 0, sizeof(struct ptc_stats));
- }
- }
-
- return count;
-}
-
-static int local_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
-/*
- * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
- * Zero values reset them to defaults.
- */
-static int parse_tunables_write(struct bau_control *bcp, char *instr,
- int count)
-{
- char *p;
- char *q;
- int cnt = 0;
- int val;
- int e = ARRAY_SIZE(tunables);
-
- p = instr + strspn(instr, WHITESPACE);
- q = p;
- for (; *p; p = q + strspn(q, WHITESPACE)) {
- q = p + strcspn(p, WHITESPACE);
- cnt++;
- if (q == p)
- break;
- }
- if (cnt != e) {
- pr_info("bau tunable error: should be %d values\n", e);
- return -EINVAL;
- }
-
- p = instr + strspn(instr, WHITESPACE);
- q = p;
- for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
- q = p + strcspn(p, WHITESPACE);
- val = local_atoi(p);
- switch (cnt) {
- case 0:
- if (val == 0) {
- max_concurr = MAX_BAU_CONCURRENT;
- max_concurr_const = MAX_BAU_CONCURRENT;
- continue;
- }
- if (val < 1 || val > bcp->cpus_in_uvhub) {
- pr_debug(
- "Error: BAU max concurrent %d is invalid\n",
- val);
- return -EINVAL;
- }
- max_concurr = val;
- max_concurr_const = val;
- continue;
- default:
- if (val == 0)
- *tunables[cnt].tunp = tunables[cnt].deflt;
- else
- *tunables[cnt].tunp = val;
- continue;
- }
- }
- return 0;
-}
-
-/*
- * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
- */
-static ssize_t tunables_write(struct file *file, const char __user *user,
- size_t count, loff_t *data)
-{
- int cpu;
- int ret;
- char instr[100];
- struct bau_control *bcp;
-
- if (count == 0 || count > sizeof(instr)-1)
- return -EINVAL;
- if (copy_from_user(instr, user, count))
- return -EFAULT;
-
- instr[count] = '\0';
-
- cpu = get_cpu();
- bcp = &per_cpu(bau_control, cpu);
- ret = parse_tunables_write(bcp, instr, count);
- put_cpu();
- if (ret)
- return ret;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->max_concurr = max_concurr;
- bcp->max_concurr_const = max_concurr;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->cong_response_us = congested_respns_us;
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
- }
- return count;
-}
-
-static const struct seq_operations uv_ptc_seq_ops = {
- .start = ptc_seq_start,
- .next = ptc_seq_next,
- .stop = ptc_seq_stop,
- .show = ptc_seq_show
-};
-
-static int ptc_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &uv_ptc_seq_ops);
-}
-
-static int tunables_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static const struct proc_ops uv_ptc_proc_ops = {
- .proc_open = ptc_proc_open,
- .proc_read = seq_read,
- .proc_write = ptc_proc_write,
- .proc_lseek = seq_lseek,
- .proc_release = seq_release,
-};
-
-static const struct file_operations tunables_fops = {
- .open = tunables_open,
- .read = tunables_read,
- .write = tunables_write,
- .llseek = default_llseek,
-};
-
-static int __init uv_ptc_init(void)
-{
- struct proc_dir_entry *proc_uv_ptc;
-
- if (!is_uv_system())
- return 0;
-
- proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
- &uv_ptc_proc_ops);
- if (!proc_uv_ptc) {
- pr_err("unable to create %s proc entry\n",
- UV_PTC_BASENAME);
- return -EINVAL;
- }
-
- tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
- debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, tunables_dir, NULL,
- &tunables_fops);
- return 0;
-}
-
-/*
- * Initialize the sending side's sending buffers.
- */
-static void activation_descriptor_init(int node, int pnode, int base_pnode)
-{
- int i;
- int cpu;
- unsigned long gpa;
- unsigned long m;
- unsigned long n;
- size_t dsize;
- struct bau_desc *bau_desc;
- struct bau_desc *bd2;
- struct uv2_3_bau_msg_header *uv2_3_hdr;
- struct bau_control *bcp;
-
- /*
- * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
- * per cpu; and one per cpu on the uvhub (ADP_SZ)
- */
- dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
- bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
- BUG_ON(!bau_desc);
-
- gpa = uv_gpa(bau_desc);
- n = uv_gpa_to_gnode(gpa);
- m = ops.bau_gpa_to_offset(gpa);
-
- /* the 14-bit pnode */
- write_mmr_descriptor_base(pnode,
- (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
- /*
- * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
- * cpu even though we only use the first one; one descriptor can
- * describe a broadcast to 256 uv hubs.
- */
- for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
- memset(bd2, 0, sizeof(struct bau_desc));
- /*
- * BIOS uses legacy mode, but uv2 and uv3 hardware always
- * uses native mode for selective broadcasts.
- */
- uv2_3_hdr = &bd2->header.uv2_3_hdr;
- uv2_3_hdr->swack_flag = 1;
- uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
- uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
- }
- for_each_present_cpu(cpu) {
- if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
- continue;
- bcp = &per_cpu(bau_control, cpu);
- bcp->descriptor_base = bau_desc;
- }
-}
-
-/*
- * initialize the destination side's receiving buffers
- * entered for each uvhub in the partition
- * - node is first node (kernel memory notion) on the uvhub
- * - pnode is the uvhub's physical identifier
- */
-static void pq_init(int node, int pnode)
-{
- int cpu;
- size_t plsize;
- char *cp;
- void *vp;
- unsigned long gnode, first, last, tail;
- struct bau_pq_entry *pqp;
- struct bau_control *bcp;
-
- plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
- vp = kmalloc_node(plsize, GFP_KERNEL, node);
- BUG_ON(!vp);
-
- pqp = (struct bau_pq_entry *)vp;
- cp = (char *)pqp + 31;
- pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
-
- for_each_present_cpu(cpu) {
- if (pnode != uv_cpu_to_pnode(cpu))
- continue;
- /* for every cpu on this pnode: */
- bcp = &per_cpu(bau_control, cpu);
- bcp->queue_first = pqp;
- bcp->bau_msg_head = pqp;
- bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
- }
-
- first = ops.bau_gpa_to_offset(uv_gpa(pqp));
- last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
-
- /*
- * Pre UV4, the gnode is required to locate the payload queue
- * and the payload queue tail must be maintained by the kernel.
- */
- bcp = &per_cpu(bau_control, smp_processor_id());
- if (bcp->uvhub_version <= UV_BAU_V3) {
- tail = first;
- gnode = uv_gpa_to_gnode(uv_gpa(pqp));
- first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
- write_mmr_payload_tail(pnode, tail);
- }
-
- ops.write_payload_first(pnode, first);
- ops.write_payload_last(pnode, last);
-
- /* in effect, all msg_type's are set to MSG_NOOP */
- memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
-}
-
-/*
- * Initialization of each UV hub's structures
- */
-static void __init init_uvhub(int uvhub, int vector, int base_pnode)
-{
- int node;
- int pnode;
- unsigned long apicid;
-
- node = uvhub_to_first_node(uvhub);
- pnode = uv_blade_to_pnode(uvhub);
-
- activation_descriptor_init(node, pnode, base_pnode);
-
- pq_init(node, pnode);
- /*
- * The below initialization can't be in firmware because the
- * messaging IRQ will be determined by the OS.
- */
- apicid = uvhub_to_first_apicid(uvhub);
- write_mmr_data_config(pnode, ((apicid << 32) | vector));
-}
-
-/*
- * We will set BAU_MISC_CONTROL with a timeout period.
- * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
- * So the destination timeout period has to be calculated from them.
- */
-static int calculate_destination_timeout(void)
-{
- unsigned long mmr_image;
- int mult1;
- int base;
- int ret;
-
- /* same destination timeout for uv2 and uv3 */
- /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
- mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
- if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- base = 80;
- else
- base = 10;
- mult1 = mmr_image & UV2_ACK_MASK;
- ret = mult1 * base;
-
- return ret;
-}
-
-static void __init init_per_cpu_tunables(void)
-{
- int cpu;
- struct bau_control *bcp;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->baudisabled = 0;
- if (nobau)
- bcp->nobau = true;
- bcp->statp = &per_cpu(ptcstats, cpu);
- /* time interval to catch a hardware stay-busy bug */
- bcp->timeout_interval = usec_2_cycles(2*timeout_us);
- bcp->max_concurr = max_concurr;
- bcp->max_concurr_const = max_concurr;
- bcp->plugged_delay = plugged_delay;
- bcp->plugsb4reset = plugsb4reset;
- bcp->timeoutsb4reset = timeoutsb4reset;
- bcp->ipi_reset_limit = ipi_reset_limit;
- bcp->complete_threshold = complete_threshold;
- bcp->cong_response_us = congested_respns_us;
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
- spin_lock_init(&bcp->queue_lock);
- spin_lock_init(&bcp->uvhub_lock);
- spin_lock_init(&bcp->disable_lock);
- }
-}
-
-/*
- * Scan all cpus to collect blade and socket summaries.
- */
-static int __init get_cpu_topology(int base_pnode,
- struct uvhub_desc *uvhub_descs,
- unsigned char *uvhub_mask)
-{
- int cpu;
- int pnode;
- int uvhub;
- int socket;
- struct bau_control *bcp;
- struct uvhub_desc *bdp;
- struct socket_desc *sdp;
-
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
-
- memset(bcp, 0, sizeof(struct bau_control));
-
- pnode = uv_cpu_hub_info(cpu)->pnode;
- if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
- pr_emerg(
- "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
- cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
- return 1;
- }
-
- bcp->osnode = cpu_to_node(cpu);
- bcp->partition_base_pnode = base_pnode;
-
- uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
- *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
- bdp = &uvhub_descs[uvhub];
-
- bdp->num_cpus++;
- bdp->uvhub = uvhub;
- bdp->pnode = pnode;
-
- /* kludge: 'assuming' one node per socket, and assuming that
- disabling a socket just leaves a gap in node numbers */
- socket = bcp->osnode & 1;
- bdp->socket_mask |= (1 << socket);
- sdp = &bdp->socket[socket];
- sdp->cpu_number[sdp->num_cpus] = cpu;
- sdp->num_cpus++;
- if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
- pr_emerg("%d cpus per socket invalid\n",
- sdp->num_cpus);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * Each socket is to get a local array of pnodes/hubs.
- */
-static void make_per_cpu_thp(struct bau_control *smaster)
-{
- int cpu;
- size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
-
- smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
- for_each_present_cpu(cpu) {
- smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
- smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
- }
-}
-
-/*
- * Each uvhub is to get a local cpumask.
- */
-static void make_per_hub_cpumask(struct bau_control *hmaster)
-{
- int sz = sizeof(cpumask_t);
-
- hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
-}
-
-/*
- * Initialize all the per_cpu information for the cpu's on a given socket,
- * given what has been gathered into the socket_desc struct.
- * And reports the chosen hub and socket masters back to the caller.
- */
-static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
- struct bau_control **smasterp,
- struct bau_control **hmasterp)
-{
- int i, cpu, uvhub_cpu;
- struct bau_control *bcp;
-
- for (i = 0; i < sdp->num_cpus; i++) {
- cpu = sdp->cpu_number[i];
- bcp = &per_cpu(bau_control, cpu);
- bcp->cpu = cpu;
- if (i == 0) {
- *smasterp = bcp;
- if (!(*hmasterp))
- *hmasterp = bcp;
- }
- bcp->cpus_in_uvhub = bdp->num_cpus;
- bcp->cpus_in_socket = sdp->num_cpus;
- bcp->socket_master = *smasterp;
- bcp->uvhub = bdp->uvhub;
- if (is_uv2_hub())
- bcp->uvhub_version = UV_BAU_V2;
- else if (is_uv3_hub())
- bcp->uvhub_version = UV_BAU_V3;
- else if (is_uv4_hub())
- bcp->uvhub_version = UV_BAU_V4;
- else {
- pr_emerg("uvhub version not 1, 2, 3, or 4\n");
- return 1;
- }
- bcp->uvhub_master = *hmasterp;
- uvhub_cpu = uv_cpu_blade_processor_id(cpu);
- bcp->uvhub_cpu = uvhub_cpu;
-
- /*
- * The ERROR and BUSY status registers are located pairwise over
- * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
- */
- if (uvhub_cpu < UV_CPUS_PER_AS) {
- bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
- bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
- } else {
- bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
- * UV_ACT_STATUS_SIZE;
- }
-
- if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
- pr_emerg("%d cpus per uvhub invalid\n",
- bcp->uvhub_cpu);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * Summarize the blade and socket topology into the per_cpu structures.
- */
-static int __init summarize_uvhub_sockets(int nuvhubs,
- struct uvhub_desc *uvhub_descs,
- unsigned char *uvhub_mask)
-{
- int socket;
- int uvhub;
- unsigned short socket_mask;
-
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- struct uvhub_desc *bdp;
- struct bau_control *smaster = NULL;
- struct bau_control *hmaster = NULL;
-
- if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
- continue;
-
- bdp = &uvhub_descs[uvhub];
- socket_mask = bdp->socket_mask;
- socket = 0;
- while (socket_mask) {
- struct socket_desc *sdp;
- if ((socket_mask & 1)) {
- sdp = &bdp->socket[socket];
- if (scan_sock(sdp, bdp, &smaster, &hmaster))
- return 1;
- make_per_cpu_thp(smaster);
- }
- socket++;
- socket_mask = (socket_mask >> 1);
- }
- make_per_hub_cpumask(hmaster);
- }
- return 0;
-}
-
-/*
- * initialize the bau_control structure for each cpu
- */
-static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
-{
- struct uvhub_desc *uvhub_descs;
- unsigned char *uvhub_mask = NULL;
-
- if (is_uv3_hub() || is_uv2_hub())
- timeout_us = calculate_destination_timeout();
-
- uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
- if (!uvhub_descs)
- goto fail;
-
- uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
- if (!uvhub_mask)
- goto fail;
-
- if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
- goto fail;
-
- if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
- goto fail;
-
- kfree(uvhub_descs);
- kfree(uvhub_mask);
- init_per_cpu_tunables();
- return 0;
-
-fail:
- kfree(uvhub_descs);
- kfree(uvhub_mask);
- return 1;
-}
-
-static const struct bau_operations uv2_3_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_offset,
- .read_l_sw_ack = read_mmr_sw_ack,
- .read_g_sw_ack = read_gmmr_sw_ack,
- .write_l_sw_ack = write_mmr_sw_ack,
- .write_g_sw_ack = write_gmmr_sw_ack,
- .write_payload_first = write_mmr_payload_first,
- .write_payload_last = write_mmr_payload_last,
- .wait_completion = uv2_3_wait_completion,
-};
-
-static const struct bau_operations uv4_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
- .read_l_sw_ack = read_mmr_proc_sw_ack,
- .read_g_sw_ack = read_gmmr_proc_sw_ack,
- .write_l_sw_ack = write_mmr_proc_sw_ack,
- .write_g_sw_ack = write_gmmr_proc_sw_ack,
- .write_payload_first = write_mmr_proc_payload_first,
- .write_payload_last = write_mmr_proc_payload_last,
- .wait_completion = uv4_wait_completion,
-};
-
-/*
- * Initialization of BAU-related structures
- */
-static int __init uv_bau_init(void)
-{
- int uvhub;
- int pnode;
- int nuvhubs;
- int cur_cpu;
- int cpus;
- int vector;
- cpumask_var_t *mask;
-
- if (!is_uv_system())
- return 0;
-
- if (is_uv4_hub())
- ops = uv4_bau_ops;
- else if (is_uv3_hub())
- ops = uv2_3_bau_ops;
- else if (is_uv2_hub())
- ops = uv2_3_bau_ops;
-
- nuvhubs = uv_num_possible_blades();
- if (nuvhubs < 2) {
- pr_crit("UV: BAU disabled - insufficient hub count\n");
- goto err_bau_disable;
- }
-
- for_each_possible_cpu(cur_cpu) {
- mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
- zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
- }
-
- uv_base_pnode = 0x7fffffff;
- for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
- cpus = uv_blade_nr_possible_cpus(uvhub);
- if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
- uv_base_pnode = uv_blade_to_pnode(uvhub);
- }
-
- /* software timeouts are not supported on UV4 */
- if (is_uv3_hub() || is_uv2_hub())
- enable_timeouts();
-
- if (init_per_cpu(nuvhubs, uv_base_pnode)) {
- pr_crit("UV: BAU disabled - per CPU init failed\n");
- goto err_bau_disable;
- }
-
- vector = UV_BAU_MESSAGE;
- for_each_possible_blade(uvhub) {
- if (uv_blade_nr_possible_cpus(uvhub))
- init_uvhub(uvhub, vector, uv_base_pnode);
- }
-
- for_each_possible_blade(uvhub) {
- if (uv_blade_nr_possible_cpus(uvhub)) {
- unsigned long val;
- unsigned long mmr;
- pnode = uv_blade_to_pnode(uvhub);
- /* INIT the bau */
- val = 1L << 63;
- write_gmmr_activation(pnode, val);
- mmr = 1; /* should be 1 to broadcast to both sockets */
- write_mmr_data_broadcast(pnode, mmr);
- }
- }
-
- return 0;
-
-err_bau_disable:
-
- for_each_possible_cpu(cur_cpu)
- free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
-
- set_bau_off();
- nobau_perm = 1;
-
- return -EINVAL;
-}
-core_initcall(uv_bau_init);
-fs_initcall(uv_ptc_init);
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 9d08ff5a755e..0f5cbcf0da63 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -2,8 +2,9 @@
/*
* SGI NMI support routines
*
- * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Mike Travis
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Mike Travis
*/
#include <linux/cpu.h>
@@ -54,6 +55,20 @@ static struct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+/* Newer SMM NMI handler, not present in all systems */
+static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */
+static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */
+static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
+static int uvh_nmi_mmrx_mask; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_MASK */
+static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
+
+/* Non-zero indicates newer SMM NMI handler present */
+static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
+static int uvh_nmi_mmrx_req_shift; /* 62 */
+
/* UV hubless values */
#define NMI_CONTROL_PORT 0x70
#define NMI_DUMMY_PORT 0x71
@@ -227,13 +242,43 @@ static inline bool uv_nmi_action_is(const char *action)
/* Setup which NMI support is present in system */
static void uv_nmi_setup_mmrs(void)
{
- if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
- uv_write_local_mmr(UVH_NMI_MMRX_REQ,
- 1UL << UVH_NMI_MMRX_REQ_SHIFT);
- nmi_mmr = UVH_NMI_MMRX;
- nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
- nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
- pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
+ /* First determine arch specific MMRs to handshake with BIOS */
+ if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) {
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_mask = UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK;
+ uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
+
+ uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
+ uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
+ uvh_nmi_mmrx_req_shift = 62;
+
+ } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) {
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED1;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_mask = UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK;
+ uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
+
+ uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
+ uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
+ uvh_nmi_mmrx_req_shift = 62;
+
+ } else {
+ pr_err("UV:%s:cannot find EVENT_OCCURRED*_EXTIO_INT0\n",
+ __func__);
+ return;
+ }
+
+ /* Then find out if new NMI is supported */
+ if (likely(uv_read_local_mmr(uvh_nmi_mmrx_supported))) {
+ uv_write_local_mmr(uvh_nmi_mmrx_req,
+ 1UL << uvh_nmi_mmrx_req_shift);
+ nmi_mmr = uvh_nmi_mmrx;
+ nmi_mmr_clear = uvh_nmi_mmrx_clear;
+ nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
+ pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
} else {
nmi_mmr = UVH_NMI_MMR;
nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
@@ -1049,5 +1094,5 @@ void __init uv_nmi_setup_hubless(void)
/* Ensure NMI enabled in Processor Interface Reg: */
uv_reassert_nmi();
uv_register_nmi_notifier();
- pr_info("UV: Hubless NMI enabled\n");
+ pr_info("UV: PCH NMI enabled\n");
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index f82a1337a608..54663f3e00cb 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -2,6 +2,7 @@
/*
* SGI RTC clock/timer routines.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Dimitri Sivanich
*/
@@ -52,7 +53,7 @@ struct uv_rtc_timer_head {
struct {
int lcpu; /* systemwide logical cpu number */
u64 expires; /* next timer expiration for this cpu */
- } cpu[1];
+ } cpu[];
};
/*
@@ -84,10 +85,8 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
- if (is_uvx_hub())
- return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
- UVXH_EVENT_OCCURRED2_RTC_1_MASK;
- return 0;
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
+ UVH_EVENT_OCCURRED2_RTC_1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -101,8 +100,8 @@ static int uv_setup_intr(int cpu, u64 expires)
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
- uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
- UVXH_EVENT_OCCURRED2_RTC_1_MASK);
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
+ UVH_EVENT_OCCURRED2_RTC_1_MASK);
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
@@ -148,9 +147,8 @@ static __init int uv_rtc_allocate_timers(void)
struct uv_rtc_timer_head *head = blade_info[bid];
if (!head) {
- head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
- (uv_blade_nr_possible_cpus(bid) *
- 2 * sizeof(u64)),
+ head = kmalloc_node(struct_size(head, cpu,
+ uv_blade_nr_possible_cpus(bid)),
GFP_KERNEL, nid);
if (!head) {
uv_rtc_deallocate_timers();
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 76cee341507b..b3b17d6c50f0 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs)
regs->areg[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;