summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig56
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/.gitignore1
-rw-r--r--arch/arm64/boot/dts/Makefile59
-rw-r--r--arch/arm64/boot/dts/actions/Makefile4
-rw-r--r--arch/arm64/boot/dts/al/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts9
-rw-r--r--arch/arm64/boot/dts/altera/Makefile4
-rw-r--r--arch/arm64/boot/dts/amd/Makefile5
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts1
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts1
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive.dts1
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi1
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi1
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi1
-rw-r--r--arch/arm64/boot/dts/amd/husky.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile5
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts19
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts1
-rw-r--r--arch/arm64/boot/dts/apm/Makefile5
-rw-r--r--arch/arm64/boot/dts/arm/Makefile5
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8-gicv3.dts1
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dts1
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts1
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts1
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile8
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts1
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/Makefile4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/Makefile5
-rw-r--r--arch/arm64/boot/dts/cavium/Makefile5
-rw-r--r--arch/arm64/boot/dts/exynos/Makefile5
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile5
-rw-r--r--arch/arm64/boot/dts/hisilicon/Makefile5
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi1
-rw-r--r--arch/arm64/boot/dts/lg/Makefile5
-rw-r--r--arch/arm64/boot/dts/lg/lg1312-ref.dts1
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi1
-rw-r--r--arch/arm64/boot/dts/lg/lg1313-ref.dts1
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile5
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi12
-rw-r--r--arch/arm64/boot/dts/nvidia/Makefile4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132-norrin.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-0000.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2571.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8004.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8994.dtsi1
-rw-r--r--arch/arm64/boot/dts/realtek/Makefile4
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile4
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi74
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/boot/dts/socionext/Makefile4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi9
-rw-r--r--arch/arm64/boot/dts/sprd/Makefile5
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile4
-rw-r--r--arch/arm64/boot/dts/zte/Makefile4
-rw-r--r--arch/arm64/crypto/Kconfig1
-rw-r--r--arch/arm64/crypto/aes-ce-setkey.h1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h12
-rw-r--r--arch/arm64/include/asm/alternative.h1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/asm-bug.h8
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h1
-rw-r--r--arch/arm64/include/asm/assembler.h51
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/bitrev.h1
-rw-r--r--arch/arm64/include/asm/boot.h1
-rw-r--r--arch/arm64/include/asm/clocksource.h1
-rw-r--r--arch/arm64/include/asm/cpu.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h42
-rw-r--r--arch/arm64/include/asm/cpuidle.h1
-rw-r--r--arch/arm64/include/asm/current.h1
-rw-r--r--arch/arm64/include/asm/daifflags.h72
-rw-r--r--arch/arm64/include/asm/efi.h1
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/esr.h3
-rw-r--r--arch/arm64/include/asm/extable.h1
-rw-r--r--arch/arm64/include/asm/fixmap.h7
-rw-r--r--arch/arm64/include/asm/fpsimd.h71
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h148
-rw-r--r--arch/arm64/include/asm/hypervisor.h1
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/irq_work.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h40
-rw-r--r--arch/arm64/include/asm/kasan.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h24
-rw-r--r--arch/arm64/include/asm/mmzone.h1
-rw-r--r--arch/arm64/include/asm/numa.h1
-rw-r--r--arch/arm64/include/asm/paravirt.h1
-rw-r--r--arch/arm64/include/asm/pci.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h14
-rw-r--r--arch/arm64/include/asm/processor.h28
-rw-r--r--arch/arm64/include/asm/spinlock.h173
-rw-r--r--arch/arm64/include/asm/spinlock_types.h6
-rw-r--r--arch/arm64/include/asm/stack_pointer.h1
-rw-r--r--arch/arm64/include/asm/stackprotector.h1
-rw-r--r--arch/arm64/include/asm/suspend.h1
-rw-r--r--arch/arm64/include/asm/sync_bitops.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h121
-rw-r--r--arch/arm64/include/asm/thread_info.h5
-rw-r--r--arch/arm64/include/asm/topology.h9
-rw-r--r--arch/arm64/include/asm/traps.h8
-rw-r--r--arch/arm64/include/asm/xen/events.h1
-rw-r--r--arch/arm64/include/asm/xen/xen-ops.h1
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/auxvec.h1
-rw-r--r--arch/arm64/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h1
-rw-r--r--arch/arm64/include/uapi/asm/fcntl.h1
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h7
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--arch/arm64/include/uapi/asm/param.h1
-rw-r--r--arch/arm64/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/arm64/include/uapi/asm/posix_types.h1
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h140
-rw-r--r--arch/arm64/include/uapi/asm/setup.h1
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h121
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h1
-rw-r--r--arch/arm64/include/uapi/asm/signal.h1
-rw-r--r--arch/arm64/include/uapi/asm/stat.h1
-rw-r--r--arch/arm64/include/uapi/asm/statfs.h1
-rw-r--r--arch/arm64/include/uapi/asm/ucontext.h1
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/acpi_numa.c1
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c25
-rw-r--r--arch/arm64/kernel/cpufeature.c206
-rw-r--r--arch/arm64/kernel/cpuinfo.c12
-rw-r--r--arch/arm64/kernel/debug-monitors.c5
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S17
-rw-r--r--arch/arm64/kernel/entry-ftrace.S12
-rw-r--r--arch/arm64/kernel/entry.S128
-rw-r--r--arch/arm64/kernel/fpsimd.c910
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/io.c12
-rw-r--r--arch/arm64/kernel/machine_kexec.c4
-rw-r--r--arch/arm64/kernel/perf_regs.c1
-rw-r--r--arch/arm64/kernel/probes/Makefile1
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S1
-rw-r--r--arch/arm64/kernel/process.c64
-rw-r--r--arch/arm64/kernel/ptrace.c280
-rw-r--r--arch/arm64/kernel/setup.c15
-rw-r--r--arch/arm64/kernel/signal.c179
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kernel/sleep.S1
-rw-r--r--arch/arm64/kernel/smp.c18
-rw-r--r--arch/arm64/kernel/suspend.c9
-rw-r--r--arch/arm64/kernel/trace-events-emulation.h1
-rw-r--r--arch/arm64/kernel/traps.c111
-rw-r--r--arch/arm64/kernel/vdso/Makefile1
-rwxr-xr-xarch/arm64/kernel/vdso/gen_vdso_offsets.sh1
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile1
-rw-r--r--arch/arm64/kvm/handle_exit.c8
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c24
-rw-r--r--arch/arm64/kvm/hyp/switch.c12
-rw-r--r--arch/arm64/kvm/inject_fault.c16
-rw-r--r--arch/arm64/kvm/sys_regs.c292
-rw-r--r--arch/arm64/kvm/trace.h1
-rw-r--r--arch/arm64/lib/Makefile3
-rw-r--r--arch/arm64/lib/delay.c23
-rw-r--r--arch/arm64/lib/tishift.S80
-rw-r--r--arch/arm64/mm/Makefile1
-rw-r--r--arch/arm64/mm/dma-mapping.c5
-rw-r--r--arch/arm64/mm/extable.c1
-rw-r--r--arch/arm64/mm/fault.c74
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/arm64/mm/physaddr.c1
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c1
230 files changed, 3455 insertions, 1004 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0df64a6a56d4..ba6aab55d464 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,8 +21,25 @@ config ARM64
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_INLINE_READ_LOCK if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_QUEUED_RWLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -98,7 +115,7 @@ config ARM64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
- select HAVE_NMI if ACPI_APEI_SEA
+ select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -119,6 +136,7 @@ config ARM64
select PCI_ECAM if ACPI
select POWER_RESET
select POWER_SUPPLY
+ select REFCOUNT_FULL
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
@@ -539,6 +557,25 @@ config QCOM_QDF2400_ERRATUM_0065
If unsure, say Y.
+
+config SOCIONEXT_SYNQUACER_PREITS
+ bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
+ default y
+ help
+ Socionext Synquacer SoCs implement a separate h/w block to generate
+ MSI doorbell writes with non-zero values for the device ID.
+
+ If unsure, say Y.
+
+config HISILICON_ERRATUM_161600802
+ bool "Hip07 161600802: Erroneous redistributor VLPI base"
+ default y
+ help
+ The HiSilicon Hip07 SoC usees the wrong redistributor base
+ when issued ITS commands such as VMOVP and VMAPP, and requires
+ a 128kB offset to be applied to the target address in this commands.
+
+ If unsure, say Y.
endmenu
@@ -806,6 +843,7 @@ config FORCE_MAX_ZONEORDER
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
+ depends on SYSCTL
help
Legacy software support may require certain instructions
that have been deprecated or obsoleted in the architecture.
@@ -946,7 +984,7 @@ config ARM64_UAO
help
User Access Override (UAO; part of the ARMv8.2 Extensions)
causes the 'unprivileged' variant of the load/store instructions to
- be overriden to be privileged.
+ be overridden to be privileged.
This option changes get_user() and friends to use the 'unprivileged'
variant of the load/store instructions. This ensures that user-space
@@ -975,6 +1013,17 @@ config ARM64_PMEM
endmenu
+config ARM64_SVE
+ bool "ARM Scalable Vector Extension support"
+ default y
+ help
+ The Scalable Vector Extension (SVE) is an extension to the AArch64
+ execution state which complements and extends the SIMD functionality
+ of the base architecture to support much larger vectors and to enable
+ additional vectorisation opportunities.
+
+ To enable use of this extension on CPUs that implement it, say Y.
+
config ARM64_MODULE_CMODEL_LARGE
bool
@@ -1063,6 +1112,7 @@ config EFI_STUB
config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
+ depends on KERNEL_MODE_NEON
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6b54ee8c1262..1d03ef54295a 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -161,6 +161,9 @@ config ARCH_SEATTLE
config ARCH_SHMOBILE
bool
+config ARCH_SYNQUACER
+ bool "Socionext SynQuacer SoC Family"
+
config ARCH_RENESAS
bool "Renesas SoC Platforms"
select ARCH_SHMOBILE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 939b310913cf..b35788c909f1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie -shared -Bsymbolic
+ifeq ($(CONFIG_RELOCATABLE), y)
+# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+# for relative relocs, since this leads to better Image compression
+# with the relocation offsets always being zero.
+LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
+ $(call ld-option, --no-apply-dynamic-relocs)
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
@@ -53,6 +57,8 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
+KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore
deleted file mode 100644
index b60ed208c779..000000000000
--- a/arch/arm64/boot/dts/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.dtb
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 8e1951273fd7..d7c22d51bc50 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,33 +1,26 @@
-dts-dirs += actions
-dts-dirs += al
-dts-dirs += allwinner
-dts-dirs += altera
-dts-dirs += amd
-dts-dirs += amlogic
-dts-dirs += apm
-dts-dirs += arm
-dts-dirs += broadcom
-dts-dirs += cavium
-dts-dirs += exynos
-dts-dirs += freescale
-dts-dirs += hisilicon
-dts-dirs += marvell
-dts-dirs += mediatek
-dts-dirs += nvidia
-dts-dirs += qcom
-dts-dirs += realtek
-dts-dirs += renesas
-dts-dirs += rockchip
-dts-dirs += socionext
-dts-dirs += sprd
-dts-dirs += xilinx
-dts-dirs += lg
-dts-dirs += zte
-
-subdir-y := $(dts-dirs)
-
-dtstree := $(srctree)/$(src)
-
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
-
-always := $(dtb-y)
+# SPDX-License-Identifier: GPL-2.0
+subdir-y += actions
+subdir-y += al
+subdir-y += allwinner
+subdir-y += altera
+subdir-y += amd
+subdir-y += amlogic
+subdir-y += apm
+subdir-y += arm
+subdir-y += broadcom
+subdir-y += cavium
+subdir-y += exynos
+subdir-y += freescale
+subdir-y += hisilicon
+subdir-y += marvell
+subdir-y += mediatek
+subdir-y += nvidia
+subdir-y += qcom
+subdir-y += realtek
+subdir-y += renesas
+subdir-y += rockchip
+subdir-y += socionext
+subdir-y += sprd
+subdir-y += xilinx
+subdir-y += lg
+subdir-y += zte
diff --git a/arch/arm64/boot/dts/actions/Makefile b/arch/arm64/boot/dts/actions/Makefile
index 62922d688ce3..cc4661256356 100644
--- a/arch/arm64/boot/dts/actions/Makefile
+++ b/arch/arm64/boot/dts/actions/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ACTIONS) += s900-bubblegum-96.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/al/Makefile b/arch/arm64/boot/dts/al/Makefile
index 8a6cde4f9b23..036e387112ed 100644
--- a/arch/arm64/boot/dts/al/Makefile
+++ b/arch/arm64/boot/dts/al/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ALPINE) += alpine-v2-evp.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 19c3fbd75eda..7d3acb355ff3 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-bananapi-m64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-nanopi-a64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-olinuxino.dtb
@@ -8,7 +9,3 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-prime.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo2.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index caf8b6fbe5e3..d06e34b5d192 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -61,13 +61,6 @@
chosen {
stdout-path = "serial0:115200n8";
};
-
- reg_vcc3v3: vcc3v3 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
};
&ehci0 {
@@ -91,7 +84,7 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
cd-inverted;
disable-wp;
diff --git a/arch/arm64/boot/dts/altera/Makefile b/arch/arm64/boot/dts/altera/Makefile
index d7a641698d77..68ba0882a8bb 100644
--- a/arch/arm64/boot/dts/altera/Makefile
+++ b/arch/arm64/boot/dts/altera/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/amd/Makefile b/arch/arm64/boot/dts/amd/Makefile
index ba84770f789f..6a6093064a32 100644
--- a/arch/arm64/boot/dts/amd/Makefile
+++ b/arch/arm64/boot/dts/amd/Makefile
@@ -1,7 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
husky.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
index 8e3074a4947d..8e341be9a399 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle Overdrive Development Board
* Note: For Seattle Rev.B0
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
index ed5e043f37aa..92cef05c6b74 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle Overdrive Development Board
* Note: For Seattle Rev.B1
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive.dts b/arch/arm64/boot/dts/amd/amd-overdrive.dts
index 128fa942f09e..41b3a6c0993d 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle Overdrive Development Board
*
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
index f623c46525a6..2dd2c28171ee 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle Clocks
*
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
index bd3adeac374f..125f4deb52fe 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle SoC
*
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
index 8e8631952497..d97498361ce3 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD Seattle XGBE (RevB)
*
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
index 1381d4b2bf1b..7acde34772cb 100644
--- a/arch/arm64/boot/dts/amd/husky.dts
+++ b/arch/arm64/boot/dts/amd/husky.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
* Note: Based-on AMD Seattle Rev.B0
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 7a9f48c27b1f..f84b83bb9809 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
@@ -19,7 +20,3 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index c89010e56488..4157987f4a3d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -168,7 +168,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -194,7 +195,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -212,10 +214,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 9697a7a79464..4b17a76959b2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -107,6 +107,9 @@
states = <3300000 0>,
<1800000 1>;
+
+ regulator-settling-time-up-us = <100>;
+ regulator-settling-time-down-us = <5000>;
};
wifi_32k: wifi-32k {
@@ -250,7 +253,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -276,11 +280,16 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
- max-frequency = <100000000>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ max-frequency = <200000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
@@ -294,10 +303,10 @@
&sd_emmc_c {
status = "disabled";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
max-frequency = <200000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 9c59c3c6d1b6..38dfdde5c147 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -51,7 +51,7 @@
/ {
compatible = "nexbox,a95x", "amlogic,meson-gxbb";
model = "NEXBOX A95X";
-
+
aliases {
serial0 = &uart_AO;
};
@@ -232,7 +232,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -253,7 +254,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -271,10 +273,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index d147c853ab05..1ffa1c238a72 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -50,7 +50,7 @@
/ {
compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
model = "Hardkernel ODROID-C2";
-
+
aliases {
serial0 = &uart_AO;
};
@@ -253,7 +253,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -271,10 +272,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
max-frequency = <200000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 81ffc689a5bf..23c08c3afd0a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -194,7 +194,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -220,10 +221,14 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;
@@ -238,10 +243,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 346753fb6324..f2bc6dea1fc6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -155,7 +155,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins &sdio_irq_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -181,7 +182,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -198,10 +200,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 52f1687e7a09..af834cdbba79 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -392,6 +392,17 @@
};
};
+ emmc_clk_gate_pins: emmc_clk_gate {
+ mux {
+ groups = "BOOT_8";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "BOOT_8";
+ bias-pull-down;
+ };
+ };
+
nor_pins: nor {
mux {
groups = "nor_d",
@@ -430,6 +441,17 @@
};
};
+ sdcard_clk_gate_pins: sdcard_clk_gate {
+ mux {
+ groups = "CARD_2";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "CARD_2";
+ bias-pull-down;
+ };
+ };
+
sdio_pins: sdio {
mux {
groups = "sdio_d0",
@@ -442,6 +464,17 @@
};
};
+ sdio_clk_gate_pins: sdio_clk_gate {
+ mux {
+ groups = "GPIOX_4";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "GPIOX_4";
+ bias-pull-down;
+ };
+ };
+
sdio_irq_pins: sdio_irq {
mux {
groups = "sdio_irq";
@@ -661,21 +694,21 @@
&sd_emmc_a {
clocks = <&clkc CLKID_SD_EMMC_A>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_A_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_b {
clocks = <&clkc CLKID_SD_EMMC_B>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_B_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_c {
clocks = <&clkc CLKID_SD_EMMC_C>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_C_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index 2a5804ce7f4b..977b4240f3c1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -123,7 +123,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -141,10 +142,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <100000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 69ca14ac10fa..64c54c92e214 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -91,6 +91,9 @@
states = <3300000 0>,
<1800000 1>;
+
+ regulator-settling-time-up-us = <200>;
+ regulator-settling-time-down-us = <50000>;
};
vddio_boot: regulator-vddio_boot {
@@ -197,10 +200,14 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;
@@ -215,10 +222,12 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
cap-mmc-highspeed;
+ mmc-ddr-3_3v;
max-frequency = <50000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 4c2ac7650fcd..1b8f32867aa1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -189,7 +189,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -210,7 +211,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -228,10 +230,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index f3eea8e89d12..129af9068814 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -95,7 +95,8 @@
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
@@ -116,7 +117,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -134,10 +136,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index d6876e64979e..d8dd3298b15c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -281,6 +281,17 @@
};
};
+ emmc_clk_gate_pins: emmc_clk_gate {
+ mux {
+ groups = "BOOT_8";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "BOOT_8";
+ bias-pull-down;
+ };
+ };
+
nor_pins: nor {
mux {
groups = "nor_d",
@@ -319,6 +330,17 @@
};
};
+ sdcard_clk_gate_pins: sdcard_clk_gate {
+ mux {
+ groups = "CARD_2";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "CARD_2";
+ bias-pull-down;
+ };
+ };
+
sdio_pins: sdio {
mux {
groups = "sdio_d0",
@@ -331,6 +353,17 @@
};
};
+ sdio_clk_gate_pins: sdio_clk_gate {
+ mux {
+ groups = "GPIOX_4";
+ function = "gpio_periphs";
+ };
+ cfg-pull-down {
+ pins = "GPIOX_4";
+ bias-pull-down;
+ };
+ };
+
sdio_irq_pins: sdio_irq {
mux {
groups = "sdio_irq";
@@ -603,21 +636,21 @@
&sd_emmc_a {
clocks = <&clkc CLKID_SD_EMMC_A>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_A_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_b {
clocks = <&clkc CLKID_SD_EMMC_B>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_B_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&sd_emmc_c {
clocks = <&clkc CLKID_SD_EMMC_C>,
- <&xtal>,
+ <&clkc CLKID_SD_EMMC_C_CLK0>,
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 9b10c5f4f8c0..22c697732f66 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -175,7 +175,8 @@
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
@@ -193,10 +194,10 @@
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_pins>;
- pinctrl-names = "default";
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 08f1dd69b679..470f72bb863c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -220,7 +220,6 @@
pinctrl-names = "default";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <200000000>;
non-removable;
diff --git a/arch/arm64/boot/dts/apm/Makefile b/arch/arm64/boot/dts/apm/Makefile
index c75f17a49471..55b5cdca13b8 100644
--- a/arch/arm64/boot/dts/apm/Makefile
+++ b/arch/arm64/boot/dts/apm/Makefile
@@ -1,6 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
dtb-$(CONFIG_ARCH_XGENE) += apm-merlin.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index 75cc2aa10101..4256bae99925 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -1,8 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb foundation-v8-gicv3.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb juno-r2.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dts b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dts
index 35588dfa095c..4825cdbdcf46 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dts
+++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd.
*
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts
index 71168077312d..8a9136f4ab74 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dts
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd.
*
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 8ecdd4331980..f0b67e439f58 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd.
*
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index fbafe62d6b22..f165f04db0c9 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "juno-clocks.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
index aa03050dd7df..0c43fb3525eb 100644
--- a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
funnel@20130000 { /* cssys1 */
compatible = "arm,coresight-funnel", "arm,primecell";
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index a83ed2c6bbf7..7810632d3438 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd. Fast Models
*
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index 528875c75598..e18fe006cc2a 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd. Fast Models
*
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index e3a171162bb4..2cb604957808 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM Ltd. Versatile Express
*
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 3eaef3895d66..2a2591ef1fee 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,7 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb
-dts-dirs += northstar2
-dts-dirs += stingray
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
+subdir-y += northstar2
+subdir-y += stingray
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
index 699d340a3437..89b78d6c19bf 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -1 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
#include "arm/bcm2837-rpi-3-b.dts"
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/Makefile b/arch/arm64/boot/dts/broadcom/northstar2/Makefile
index e01a1485b813..83736004336d 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/Makefile
+++ b/arch/arm64/boot/dts/broadcom/northstar2/Makefile
@@ -1,6 +1,2 @@
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-xmc.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/stingray/Makefile b/arch/arm64/boot/dts/broadcom/stingray/Makefile
index f70028edad63..c4d06cffcb11 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/Makefile
+++ b/arch/arm64/boot/dts/broadcom/stingray/Makefile
@@ -1,6 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742k.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742t.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/cavium/Makefile b/arch/arm64/boot/dts/cavium/Makefile
index 581b2c1c400a..c178f7e06e18 100644
--- a/arch/arm64/boot/dts/cavium/Makefile
+++ b/arch/arm64/boot/dts/cavium/Makefile
@@ -1,6 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
dtb-$(CONFIG_ARCH_THUNDER2) += thunder2-99xx.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
index 7ddea53769a7..e0a2facde6a2 100644
--- a/arch/arm64/boot/dts/exynos/Makefile
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -1,8 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_EXYNOS) += \
exynos5433-tm2.dtb \
exynos5433-tm2e.dtb \
exynos7-espresso.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 72c4b525726f..86e18adb695a 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
@@ -12,7 +13,3 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index 8960ecafd37d..03d93f8ef8a9 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -1,10 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_HISI) += hi3660-hikey960.dtb
dtb-$(CONFIG_ARCH_HISI) += hi3798cv200-poplar.dtb
dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
dtb-$(CONFIG_ARCH_HISI) += hip07-d05.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index fd4705c451e2..e9f87cb61ade 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for Hisilicon HiKey960 Development Board
*
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index b7a90d632959..13ae69f5a327 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for Hisilicon Hi3660 SoC
*
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 2b526304ed27..3aee6123d161 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for Hisilicon HiKey Development Board
*
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 02a3aa4b2165..ff1dc89f599e 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for Hisilicon Hi6220 SoC
*
diff --git a/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
index 0916e8459d6b..e7d22619a4c0 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pinctrl dts fils for Hislicon HiKey development board
*
diff --git a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
index 7e542d28dadb..d11efc81958c 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pinctrl dts fils for Hislicon HiKey960 development board
*
diff --git a/arch/arm64/boot/dts/lg/Makefile b/arch/arm64/boot/dts/lg/Makefile
index 5c7b54c12adc..4c3959e24e1b 100644
--- a/arch/arm64/boot/dts/lg/Makefile
+++ b/arch/arm64/boot/dts/lg/Makefile
@@ -1,6 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_LG1K) += lg1312-ref.dtb
dtb-$(CONFIG_ARCH_LG1K) += lg1313-ref.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/lg/lg1312-ref.dts b/arch/arm64/boot/dts/lg/lg1312-ref.dts
index 6d78d6bc7f9c..260a2c5b19e5 100644
--- a/arch/arm64/boot/dts/lg/lg1312-ref.dts
+++ b/arch/arm64/boot/dts/lg/lg1312-ref.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for lg1312 Reference Board.
*
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index fbafa24cd533..860c8fb10795 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for lg1312 SoC
*
diff --git a/arch/arm64/boot/dts/lg/lg1313-ref.dts b/arch/arm64/boot/dts/lg/lg1313-ref.dts
index df0ece43cfbf..e89ae853788a 100644
--- a/arch/arm64/boot/dts/lg/lg1313-ref.dts
+++ b/arch/arm64/boot/dts/lg/lg1313-ref.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for lg1313 Reference Board.
*
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
index e703e1149c75..1887af654a7d 100644
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dts file for lg1313 SoC
*
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index 6cff81eeaae2..cb454beede55 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Berlin SoC Family
dtb-$(CONFIG_ARCH_BERLIN) += berlin4ct-dmp.dtb
dtb-$(CONFIG_ARCH_BERLIN) += berlin4ct-stb.dtb
@@ -9,7 +10,3 @@ dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-mcbin.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8080-db.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 4d360713ed12..30d48ecf46e0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -254,7 +254,7 @@
ap_syscon: system-controller@6f4000 {
compatible = "syscon", "simple-mfd";
- reg = <0x6f4000 0x1000>;
+ reg = <0x6f4000 0x2000>;
ap_clk: clock {
compatible = "marvell,ap806-clock";
@@ -265,7 +265,7 @@
compatible = "marvell,ap806-pinctrl";
};
- ap_gpio: gpio {
+ ap_gpio: gpio@1040 {
compatible = "marvell,armada-8k-gpio";
offset = <0x1040>;
ngpios = <20>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 8263a8a504a8..f2aa2a81de4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -336,7 +336,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cpm_clk 1 13>;
@@ -362,7 +362,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@@ -389,7 +389,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index b71ee6c83668..4fe70323abb3 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -335,7 +335,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cps_clk 1 13>;
@@ -361,7 +361,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@@ -388,7 +388,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 151723b5c733..ac17f60f998c 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -1,10 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_MEDIATEK) += mt2712-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6755-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index b99a27372965..26396ef53bde 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -682,8 +682,7 @@
};
mmc0: mmc@11230000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11230000 0 0x1000>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_0>,
@@ -693,8 +692,7 @@
};
mmc1: mmc@11240000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11240000 0 0x1000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_1>,
@@ -704,8 +702,7 @@
};
mmc2: mmc@11250000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11250000 0 0x1000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_2>,
@@ -715,8 +712,7 @@
};
mmc3: mmc@11260000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11260000 0 0x1000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_3>,
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 18941458cb4d..676aa2f238d1 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -1,9 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-norrin.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index 759af96a6b49..a0385a386a3f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include <dt-bindings/input/input.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index c2f0f2743578..fa5a7c4bc807 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/clock/tegra124-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
#include <dt-bindings/memory/tegra124-mc.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 8daadadec63a..c71d762bf697 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include <dt-bindings/input/linux-event-codes.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index cf84d7046ad5..54f418d05e15 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "tegra186.dtsi"
#include <dt-bindings/mfd/max77620.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 0b0552c9f7dd..a9c3eef6c4e0 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/clock/tegra186-clock.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 906fb836d241..d10d4430537a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/mfd/max77620.h>
#include "tegra210.dtsi"
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-0000.dts
index 1ddd8512e100..21c6d3749bc6 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-0000.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include "tegra210-p2530.dtsi"
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 7cb95e042117..37e3c46e753f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include "tegra210-p2180.dtsi"
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
index 0ec92578cacb..be6066ff97c9 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "tegra210.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
index 576957a55801..e2a347e57215 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include <dt-bindings/input/input.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
index e008e3364d2a..6ae292da7294 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
model = "NVIDIA Tegra210 P2595 I/O board";
compatible = "nvidia,p2595", "nvidia,tegra210";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index e5fc67bf46c2..d67ef4319f3b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/input/input.h>
/ {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 7703227f5d1a..43cae4798870 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include <dt-bindings/input/input.h>
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 29f471e0f22a..9bdf19f2cca7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
#include <dt-bindings/memory/tegra210-mc.h>
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index ff81d7e5805e..55ec5ee7f7e8 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
@@ -5,7 +6,3 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index 790b7775b901..ec2f0de67993 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index 185388de914c..21d0822f1ca6 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
index 8e379782597a..59b29ddfb6e9 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
&pm8994_gpios {
diff --git a/arch/arm64/boot/dts/qcom/pm8004.dtsi b/arch/arm64/boot/dts/qcom/pm8004.dtsi
index ef2207afa86b..297b57bfa87a 100644
--- a/arch/arm64/boot/dts/qcom/pm8004.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8004.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 53deebf9f515..0223e60d8b6a 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/iio/qcom,spmi-vadc.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index b413e44fd09e..80024c0b1c7c 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index 57673f92805d..dae1cdc23f54 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
diff --git a/arch/arm64/boot/dts/realtek/Makefile b/arch/arm64/boot/dts/realtek/Makefile
index 8521e921e59a..6e2ae59a3745 100644
--- a/arch/arm64/boot/dts/realtek/Makefile
+++ b/arch/arm64/boot/dts/realtek/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-zidoo-x9s.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 381928bc1358..6b282283f1bf 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,8 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-salvator-x.dtb r8a7795-es1-h3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A77995) += r8a77995-draak.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 4786c67b5e65..d9d885006a8e 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -62,6 +62,7 @@
brightness-levels = <256 128 64 16 8 4 0>;
default-brightness-level = <6>;
+ power-supply = <&reg_12v>;
enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
};
@@ -83,6 +84,15 @@
regulator-always-on;
};
+ reg_12v: regulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
rsnd_ak4613: sound {
compatible = "simple-audio-card";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index f1c9b13cea5c..ce2701e37d00 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
@@ -10,7 +11,3 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-firefly.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d615cb6e64d..41d61840fb99 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -582,7 +582,7 @@
vop_mmu: iommu@ff373f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff373f00 0x0 0x100>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vop_mmu";
#iommu-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index e0518b4bc6c2..1070c8264c13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -113,8 +113,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
+
#cooling-cells = <2>; /* min followed by max */
};
@@ -123,8 +122,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x1>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_l2: cpu@2 {
@@ -132,8 +129,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x2>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_l3: cpu@3 {
@@ -141,8 +136,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x3>;
enable-method = "psci";
- clocks = <&cru ARMCLKL>;
- operating-points-v2 = <&cluster0_opp>;
};
cpu_b0: cpu@100 {
@@ -150,8 +143,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x100>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
+
#cooling-cells = <2>; /* min followed by max */
};
@@ -160,8 +152,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x101>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
};
cpu_b2: cpu@102 {
@@ -169,8 +159,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x102>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
};
cpu_b3: cpu@103 {
@@ -178,62 +166,6 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x103>;
enable-method = "psci";
- clocks = <&cru ARMCLKB>;
- operating-points-v2 = <&cluster1_opp>;
- };
- };
-
- cluster0_opp: opp-table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <312000000>;
- opp-microvolt = <950000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <950000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <950000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <1025000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <1125000>;
- };
- };
-
- cluster1_opp: opp-table1 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <312000000>;
- opp-microvolt = <950000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <950000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <950000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <975000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <1050000>;
};
};
@@ -808,7 +740,7 @@
iep_mmu: iommu@ff900800 {
compatible = "rockchip,iommu";
reg = <0x0 0xff900800 0x0 0x100>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "iep_mmu";
#iommu-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 7fd4bfcaa38e..fef82274a39d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -371,10 +371,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 53ff3d191a1d..910628d18add 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -325,12 +325,12 @@
vcc_sd: LDO_REG4 {
regulator-name = "vcc_sd";
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 6c30bb02210d..0f873c897d0d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -315,10 +315,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d79e9b3265b9..ab7629c5b856 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1629,9 +1629,9 @@
compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
reg = <0x0 0xff960000 0x0 0x8000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>,
- <&cru SCLK_DPHY_TX0_CFG>;
- clock-names = "ref", "pclk", "phy_cfg";
+ clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
+ <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
+ clock-names = "ref", "pclk", "phy_cfg", "grf";
power-domains = <&power RK3399_PD_VIO>;
rockchip,grf = <&grf>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile
index 4bc091b365fd..d45441249cb5 100644
--- a/arch/arm64/boot/dts/socionext/Makefile
+++ b/arch/arm64/boot/dts/socionext/Makefile
@@ -1,9 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_UNIPHIER) += \
uniphier-ld11-global.dtb \
uniphier-ld11-ref.dtb \
uniphier-ld20-global.dtb \
uniphier-ld20-ref.dtb \
uniphier-pxs3-ref.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index ee4aff53a5f5..09c429cb6d61 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -299,7 +299,8 @@
interrupts = <0 243 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
- clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 8>,
+ <&mio_clk 12>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
<&mio_rst 12>;
};
@@ -311,7 +312,8 @@
interrupts = <0 244 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
- clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 9>,
+ <&mio_clk 13>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
<&mio_rst 13>;
};
@@ -323,7 +325,8 @@
interrupts = <0 245 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
- clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ clocks = <&sys_clk 8>, <&mio_clk 7>, <&mio_clk 10>,
+ <&mio_clk 14>;
resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
<&mio_rst 14>;
};
diff --git a/arch/arm64/boot/dts/sprd/Makefile b/arch/arm64/boot/dts/sprd/Makefile
index f0535e6eaaaa..2bdc23804f40 100644
--- a/arch/arm64/boot/dts/sprd/Makefile
+++ b/arch/arm64/boot/dts/sprd/Makefile
@@ -1,6 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb \
sp9860g-1h10.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index ae16427f6a4a..a2d67084a514 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-ep108.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/zte/Makefile b/arch/arm64/boot/dts/zte/Makefile
index d86c4def6bc9..14a1cdfc1559 100644
--- a/arch/arm64/boot/dts/zte/Makefile
+++ b/arch/arm64/boot/dts/zte/Makefile
@@ -1,6 +1,2 @@
dtb-$(CONFIG_ARCH_ZX) += zx296718-evb.dtb
dtb-$(CONFIG_ARCH_ZX) += zx296718-pcbox.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 7ca54a76f6b9..70c517aa4501 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig ARM64_CRYPTO
bool "ARM64 Accelerated Cryptographic Algorithms"
diff --git a/arch/arm64/crypto/aes-ce-setkey.h b/arch/arm64/crypto/aes-ce-setkey.h
index f08a6471d034..fd9ecf07d88c 100644
--- a/arch/arm64/crypto/aes-ce-setkey.h
+++ b/arch/arm64/crypto/aes-ce-setkey.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 2326e39d5892..e63d0a8312de 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
+generic-y += qrwlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 59cca1d6ec54..32f465a80e4e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -126,18 +126,6 @@ static inline const char *acpi_get_enable_method(int cpu)
*/
#define acpi_disable_cmcff 1
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
-
-/*
- * Despite its name, this function must still broadcast the TLB
- * invalidation in order to ensure other CPUs don't end up with junk
- * entries as a result of speculation. Unusually, its also called in
- * IRQ context (ghes_iounmap_irq) so if we ever need to use IPIs for
- * TLB broadcasting, then we're in trouble here.
- */
-static inline void arch_apei_flush_tlb_one(unsigned long addr)
-{
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
-}
#endif /* CONFIG_ACPI_APEI */
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 6e1cb8c5af4d..4a85c6952a22 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index b7e3f74822da..9becba9ab392 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -87,6 +87,11 @@ static inline void gic_write_ctlr(u32 val)
isb();
}
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg_s(SYS_ICC_CTLR_EL1);
+}
+
static inline void gic_write_grpen1(u32 val)
{
write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index a652ce0a5cb2..bdedd8f748d1 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -144,6 +144,7 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
write_sysreg(cntkctl, cntkctl_el1);
+ isb();
}
static inline u64 arch_counter_get_cntpct(void)
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index 636e755bcdca..b3552c4a405f 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -22,10 +22,10 @@
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
#define __BUGVERBOSE_LOCATION(file, line) \
.pushsection .rodata.str,"aMS",@progbits,1; \
- 2: .string file; \
+ 14472: .string file; \
.popsection; \
\
- .long 2b - 0b; \
+ .long 14472b - 14470b; \
.short line;
#else
#define _BUGVERBOSE_LOCATION(file, line)
@@ -36,11 +36,11 @@
#define __BUG_ENTRY(flags) \
.pushsection __bug_table,"aw"; \
.align 2; \
- 0: .long 1f - 0b; \
+ 14470: .long 14471f - 14470b; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
.popsection; \
- 1:
+ 14471:
#else
#define __BUG_ENTRY(flags)
#endif
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index ecd9788cd298..b3da6c886835 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ASM_UACCESS_H
#define __ASM_ASM_UACCESS_H
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index d58a6253c6ab..aef72d886677 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -25,12 +25,41 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/debug-monitors.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
+ .macro save_and_disable_daif, flags
+ mrs \flags, daif
+ msr daifset, #0xf
+ .endm
+
+ .macro disable_daif
+ msr daifset, #0xf
+ .endm
+
+ .macro enable_daif
+ msr daifclr, #0xf
+ .endm
+
+ .macro restore_daif, flags:req
+ msr daif, \flags
+ .endm
+
+ /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
+ .macro inherit_daif, pstate:req, tmp:req
+ and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+ msr daif, \tmp
+ .endm
+
+ /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
+ .macro enable_da_f
+ msr daifclr, #(8 | 4 | 1)
+ .endm
+
/*
* Enable and disable interrupts.
*/
@@ -51,13 +80,6 @@
msr daif, \flags
.endm
-/*
- * Enable and disable debug exceptions.
- */
- .macro disable_dbg
- msr daifset, #8
- .endm
-
.macro enable_dbg
msr daifclr, #8
.endm
@@ -65,31 +87,22 @@
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- bic \tmp, \tmp, #1
+ bic \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
isb // Synchronise with enable_dbg
9990:
.endm
+ /* call with daif masked */
.macro enable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
- disable_dbg
mrs \tmp, mdscr_el1
- orr \tmp, \tmp, #1
+ orr \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
9990:
.endm
/*
- * Enable both debug exceptions and interrupts. This is likely to be
- * faster than two daifclr operations, since writes to this register
- * are self-synchronising.
- */
- .macro enable_dbg_and_irq
- msr daifclr, #(8 | 2)
- .endm
-
-/*
* SMP data memory barrier
*/
.macro smp_dmb, opt
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fe7e43b7fbc..77651c49ef44 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -31,6 +31,8 @@
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
+#define psb_csync() asm volatile("hint #17" : : : "memory")
+
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
diff --git a/arch/arm64/include/asm/bitrev.h b/arch/arm64/include/asm/bitrev.h
index a5a0c3660137..6faf9fba8c65 100644
--- a/arch/arm64/include/asm/bitrev.h
+++ b/arch/arm64/include/asm/bitrev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BITREV_H
#define __ASM_BITREV_H
static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index ebf2481889c3..355e552a9175 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BOOT_H
#define __ASM_BOOT_H
diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h
index 0b350a7e26f3..0ece64a26c8c 100644
--- a/arch/arm64/include/asm/clocksource.h
+++ b/arch/arm64/include/asm/clocksource.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 889226b4c6e1..88392272250e 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -41,6 +41,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64zfr0;
u32 reg_id_dfr0;
u32 reg_id_isar0;
@@ -59,6 +60,9 @@ struct cpuinfo_arm64 {
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
+
+ /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
+ u64 reg_zcr;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8da621627d7c..2ff7c5e8efab 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -40,7 +40,8 @@
#define ARM64_WORKAROUND_858921 19
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
+#define ARM64_SVE 22
-#define ARM64_NCAPS 22
+#define ARM64_NCAPS 23
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 428ee1f2468c..ac67cfc2585a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -10,7 +10,9 @@
#define __ASM_CPUFEATURE_H
#include <asm/cpucaps.h>
+#include <asm/fpsimd.h>
#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
#include <asm/sysreg.h>
/*
@@ -223,6 +225,13 @@ static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
return val == ID_AA64PFR0_EL0_32BIT_64BIT;
}
+static inline bool id_aa64pfr0_sve(u64 pfr0)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
+
+ return val > 0;
+}
+
void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
@@ -262,6 +271,39 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN);
}
+static inline bool system_supports_sve(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SVE) &&
+ cpus_have_const_cap(ARM64_SVE);
+}
+
+/*
+ * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
+ * vector length.
+ *
+ * Use only if SVE is present.
+ * This function clobbers the SVE vector length.
+ */
+static inline u64 read_zcr_features(void)
+{
+ u64 zcr;
+ unsigned int vq_max;
+
+ /*
+ * Set the maximum possible VL, and write zeroes to all other
+ * bits to see if they stick.
+ */
+ sve_kernel_enable(NULL);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+
+ zcr = read_sysreg_s(SYS_ZCR_EL1);
+ zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ return zcr;
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 0f74f05d662a..3c5ddb429ea2 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CPUIDLE_H
#define __ASM_CPUIDLE_H
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index f6580d4afb0e..54ceae0874c7 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CURRENT_H
#define __ASM_CURRENT_H
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
new file mode 100644
index 000000000000..22e4c83de5a5
--- /dev/null
+++ b/arch/arm64/include/asm/daifflags.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_DAIFFLAGS_H
+#define __ASM_DAIFFLAGS_H
+
+#include <linux/irqflags.h>
+
+#define DAIF_PROCCTX 0
+#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+
+/* mask/save/unmask/restore all exceptions, including interrupts. */
+static inline void local_daif_mask(void)
+{
+ asm volatile(
+ "msr daifset, #0xf // local_daif_mask\n"
+ :
+ :
+ : "memory");
+ trace_hardirqs_off();
+}
+
+static inline unsigned long local_daif_save(void)
+{
+ unsigned long flags;
+
+ asm volatile(
+ "mrs %0, daif // local_daif_save\n"
+ : "=r" (flags)
+ :
+ : "memory");
+ local_daif_mask();
+
+ return flags;
+}
+
+static inline void local_daif_unmask(void)
+{
+ trace_hardirqs_on();
+ asm volatile(
+ "msr daifclr, #0xf // local_daif_unmask"
+ :
+ :
+ : "memory");
+}
+
+static inline void local_daif_restore(unsigned long flags)
+{
+ if (!arch_irqs_disabled_flags(flags))
+ trace_hardirqs_on();
+ asm volatile(
+ "msr daif, %0 // local_daif_restore"
+ :
+ : "r" (flags)
+ : "memory");
+ if (arch_irqs_disabled_flags(flags))
+ trace_hardirqs_off();
+}
+
+#endif
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index b93904b16fc2..650344d01124 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_EFI_H
#define _ASM_EFI_H
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 33be513ef24c..fac1c4de7898 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -188,8 +188,8 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
#define compat_start_thread compat_start_thread
/*
- * Unlike the native SET_PERSONALITY macro, the compat version inherits
- * READ_IMPLIES_EXEC across a fork() since this is the behaviour on
+ * Unlike the native SET_PERSONALITY macro, the compat version maintains
+ * READ_IMPLIES_EXEC across an execve() since this is the behaviour on
* arch/arm/.
*/
#define COMPAT_SET_PERSONALITY(ex) \
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 66ed8b6b9976..014d7d8edcf9 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -43,7 +43,8 @@
#define ESR_ELx_EC_HVC64 (0x16)
#define ESR_ELx_EC_SMC64 (0x17)
#define ESR_ELx_EC_SYS64 (0x18)
-/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_SVE (0x19)
+/* Unallocated EC: 0x1A - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f)
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 42f50f15a44c..56a4f68b262e 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be815ba..4052ec39e8db 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,13 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
+
+#ifdef CONFIG_ACPI_APEI_GHES
+ /* Used for GHES mapping from assorted contexts */
+ FIX_APEI_GHES_IRQ,
+ FIX_APEI_GHES_NMI,
+#endif /* CONFIG_ACPI_APEI_GHES */
+
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 410c48163c6a..74f34392a531 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -17,9 +17,13 @@
#define __ASM_FP_H
#include <asm/ptrace.h>
+#include <asm/errno.h>
#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+#include <linux/stddef.h>
+
/*
* FP/SIMD storage area has:
* - FPSR and FPCR
@@ -35,13 +39,16 @@ struct fpsimd_state {
__uint128_t vregs[32];
u32 fpsr;
u32 fpcr;
+ /*
+ * For ptrace compatibility, pad to next 128-bit
+ * boundary here if extending this struct.
+ */
};
};
/* the id of the last cpu to have restored this state */
unsigned int cpu;
};
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
@@ -61,11 +68,73 @@ extern void fpsimd_load_state(struct fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
+extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct fpsimd_state *state);
extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void sve_flush_cpu_state(void);
+
+/* Maximum VL that SVE VL-agnostic software can transparently support */
+#define SVE_VL_ARCH_MAX 0x100
+
+extern void sve_save_state(void *state, u32 *pfpsr);
+extern void sve_load_state(void const *state, u32 const *pfpsr,
+ unsigned long vq_minus_1);
+extern unsigned int sve_get_vl(void);
+extern int sve_kernel_enable(void *);
+
+extern int __ro_after_init sve_max_vl;
+
+#ifdef CONFIG_ARM64_SVE
+
+extern size_t sve_state_size(struct task_struct const *task);
+
+extern void sve_alloc(struct task_struct *task);
+extern void fpsimd_release_task(struct task_struct *task);
+extern void fpsimd_sync_to_sve(struct task_struct *task);
+extern void sve_sync_to_fpsimd(struct task_struct *task);
+extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
+
+extern int sve_set_vector_length(struct task_struct *task,
+ unsigned long vl, unsigned long flags);
+
+extern int sve_set_current_vl(unsigned long arg);
+extern int sve_get_current_vl(void);
+
+/*
+ * Probing and setup functions.
+ * Calls to these functions must be serialised with one another.
+ */
+extern void __init sve_init_vq_map(void);
+extern void sve_update_vq_map(void);
+extern int sve_verify_vq_map(void);
+extern void __init sve_setup(void);
+
+#else /* ! CONFIG_ARM64_SVE */
+
+static inline void sve_alloc(struct task_struct *task) { }
+static inline void fpsimd_release_task(struct task_struct *task) { }
+static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
+static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
+
+static inline int sve_set_current_vl(unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static inline int sve_get_current_vl(void)
+{
+ return -EINVAL;
+}
+
+static inline void sve_init_vq_map(void) { }
+static inline void sve_update_vq_map(void) { }
+static inline int sve_verify_vq_map(void) { return 0; }
+static inline void sve_setup(void) { }
+
+#endif /* ! CONFIG_ARM64_SVE */
/* For use by EFI runtime services calls only */
extern void __efi_fpsimd_begin(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 0f5fdd388b0d..e050d765ca9e 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -75,3 +75,151 @@
ldr w\tmpnr, [\state, #16 * 2 + 4]
fpsimd_restore_fpcr x\tmpnr, \state
.endm
+
+/* Sanity-check macros to help avoid encoding garbage instructions */
+
+.macro _check_general_reg nr
+ .if (\nr) < 0 || (\nr) > 30
+ .error "Bad register number \nr."
+ .endif
+.endm
+
+.macro _sve_check_zreg znr
+ .if (\znr) < 0 || (\znr) > 31
+ .error "Bad Scalable Vector Extension vector register number \znr."
+ .endif
+.endm
+
+.macro _sve_check_preg pnr
+ .if (\pnr) < 0 || (\pnr) > 15
+ .error "Bad Scalable Vector Extension predicate register number \pnr."
+ .endif
+.endm
+
+.macro _check_num n, min, max
+ .if (\n) < (\min) || (\n) > (\max)
+ .error "Number \n out of range [\min,\max]"
+ .endif
+.endm
+
+/* SVE instruction encodings for non-SVE-capable assemblers */
+
+/* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_str_v nz, nxbase, offset=0
+ _sve_check_zreg \nz
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe5804000 \
+ | (\nz) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* LDR (vector): LDR Z\nz, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_ldr_v nz, nxbase, offset=0
+ _sve_check_zreg \nz
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0x85804000 \
+ | (\nz) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* STR (predicate): STR P\np, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_str_p np, nxbase, offset=0
+ _sve_check_preg \np
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe5800000 \
+ | (\np) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* LDR (predicate): LDR P\np, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_ldr_p np, nxbase, offset=0
+ _sve_check_preg \np
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0x85800000 \
+ | (\np) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* RDVL X\nx, #\imm */
+.macro _sve_rdvl nx, imm
+ _check_general_reg \nx
+ _check_num (\imm), -0x20, 0x1f
+ .inst 0x04bf5000 \
+ | (\nx) \
+ | (((\imm) & 0x3f) << 5)
+.endm
+
+/* RDFFR (unpredicated): RDFFR P\np.B */
+.macro _sve_rdffr np
+ _sve_check_preg \np
+ .inst 0x2519f000 \
+ | (\np)
+.endm
+
+/* WRFFR P\np.B */
+.macro _sve_wrffr np
+ _sve_check_preg \np
+ .inst 0x25289000 \
+ | ((\np) << 5)
+.endm
+
+.macro __for from:req, to:req
+ .if (\from) == (\to)
+ _for__body \from
+ .else
+ __for \from, (\from) + ((\to) - (\from)) / 2
+ __for (\from) + ((\to) - (\from)) / 2 + 1, \to
+ .endif
+.endm
+
+.macro _for var:req, from:req, to:req, insn:vararg
+ .macro _for__body \var:req
+ \insn
+ .endm
+
+ __for \from, \to
+
+ .purgem _for__body
+.endm
+
+.macro sve_save nxbase, xpfpsr, nxtmp
+ _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
+ _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
+ _sve_rdffr 0
+ _sve_str_p 0, \nxbase
+ _sve_ldr_p 0, \nxbase, -16
+
+ mrs x\nxtmp, fpsr
+ str w\nxtmp, [\xpfpsr]
+ mrs x\nxtmp, fpcr
+ str w\nxtmp, [\xpfpsr, #4]
+.endm
+
+.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp
+ mrs_s x\nxtmp, SYS_ZCR_EL1
+ bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK
+ orr x\nxtmp, x\nxtmp, \xvqminus1
+ msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising
+
+ _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
+ _sve_ldr_p 0, \nxbase
+ _sve_wrffr 0
+ _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
+
+ ldr w\nxtmp, [\xpfpsr]
+ msr fpsr, x\nxtmp
+ ldr w\nxtmp, [\xpfpsr, #4]
+ msr fpcr, x\nxtmp
+.endm
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index d2c79049ff11..f9cc1d021791 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_HYPERVISOR_H
#define _ASM_ARM64_HYPERVISOR_H
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 5e6f77239064..a0fee6985e6a 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index 8e24ef3f7c82..8a1ef1907760 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 8c581281fa12..24692edf1a69 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -21,6 +21,19 @@
#include <asm/ptrace.h>
/*
+ * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
+ * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
+ * order:
+ * Masking debug exceptions causes all other exceptions to be masked too/
+ * Masking SError masks irq, but not debug exceptions. Masking irqs has no
+ * side effects for other flags. Keeping to this order makes it easier for
+ * entry.S to know which exceptions should be unmasked.
+ *
+ * FIQ is never expected, but we mask it when we disable debug exceptions, and
+ * unmask it at all other times.
+ */
+
+/*
* CPU interrupt mask handling.
*/
static inline unsigned long arch_local_irq_save(void)
@@ -53,12 +66,6 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
-#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
-#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
-
/*
* Save the current interrupt enable state.
*/
@@ -89,26 +96,5 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags & PSR_I_BIT;
}
-
-/*
- * save and restore debug state
- */
-#define local_dbg_save(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "mrs %0, daif // local_dbg_save\n" \
- "msr daifset, #8" \
- : "=r" (flags) : : "memory"); \
- } while (0)
-
-#define local_dbg_restore(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "msr daif, %0 // local_dbg_restore\n" \
- : : "r" (flags) : "memory"); \
- } while (0)
-
#endif
#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 71ad0f93eb71..e266f80e45b7 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 61d694c2eae5..7f069ff37f06 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -185,7 +185,9 @@
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
-#define CPTR_EL2_DEFAULT 0x000033ff
+#define CPTR_EL2_TZ (1 << 8)
+#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
+#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TPMS (1 << 14)
@@ -236,5 +238,6 @@
#define CPACR_EL1_FPEN (3 << 20)
#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN)
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e923b58606e2..674912d7a571 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -384,4 +385,14 @@ static inline void __cpu_init_stage2(void)
"PARange is %d bits, unsupported configuration!", parange);
}
+/*
+ * All host FP/SIMD state is restored on guest exit, so nothing needs
+ * doing here except in the SVE case:
+*/
+static inline void kvm_fpsimd_flush_cpu_state(void)
+{
+ if (system_supports_sve())
+ sve_flush_cpu_state();
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 606b20910a5c..eec95768eaad 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LSE_H
#define __ASM_LSE_H
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3585a5e26151..d4bae7d6e0d8 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -61,8 +61,6 @@
* KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
- * TASK_SIZE - the maximum size of a user space task.
- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) - \
@@ -77,34 +75,24 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 UL(0x100000000)
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#else
-#define TASK_SIZE TASK_SIZE_64
-#endif /* CONFIG_COMPAT */
-
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
#define KERNEL_START _text
#define KERNEL_END _end
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_SHADOW_SIZE (0)
+#define KASAN_THREAD_SHIFT 0
#endif
-#define MIN_THREAD_SHIFT 14
+#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h
index a0de9e6ba73f..fa17e01d9ab2 100644
--- a/arch/arm64/include/asm/mmzone.h
+++ b/arch/arm64/include/asm/mmzone.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMZONE_H
#define __ASM_MMZONE_H
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index ef7b23863a7c..01bc46d5b43a 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_NUMA_H
#define __ASM_NUMA_H
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index fd5f42886251..bb5dcea42003 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_PARAVIRT_H
#define _ASM_ARM64_PARAVIRT_H
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 1fc19744ffe9..8747f7c5e0e7 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PCI_H
#define __ASM_PCI_H
#ifdef __KERNEL__
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b46e54c2399b..c9530b5b5ca8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
@@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+/*
+ * p??_access_permitted() is true for valid user mappings (subject to the
+ * write permission check) other than user execute-only which do not have the
+ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ */
+#define pte_access_permitted(pte, write) \
+ (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+#define pmd_access_permitted(pmd, write) \
+ (pte_access_permitted(pmd_pte(pmd), (write)))
+#define pud_access_permitted(pud, write) \
+ (pte_access_permitted(pud_pte(pud), (write)))
+
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) &= ~pgprot_val(prot);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 29adab8138c3..023cacb946c3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,6 +19,10 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
+#define TASK_SIZE_64 (UL(1) << VA_BITS)
+
+#ifndef __ASSEMBLY__
+
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -37,6 +41,22 @@
#include <asm/ptrace.h>
#include <asm/types.h>
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ */
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 UL(0x100000000)
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif /* CONFIG_COMPAT */
+
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
@@ -85,6 +105,9 @@ struct thread_struct {
unsigned long tp2_value;
#endif
struct fpsimd_state fpsimd_state;
+ void *sve_state; /* SVE registers, if any */
+ unsigned int sve_vl; /* SVE vector length */
+ unsigned int sve_vl_onexec; /* SVE vl after next exec */
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
@@ -194,4 +217,9 @@ static inline void spin_lock_prefetch(const void *ptr)
int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
+#define SVE_SET_VL(arg) sve_set_current_vl(arg)
+#define SVE_GET_VL() sve_get_current_vl()
+
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 95ad7102b63c..fdb827c7832f 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -27,8 +27,6 @@
* instructions.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
@@ -139,176 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
}
#define arch_spin_is_contended arch_spin_is_contended
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: mov %w0, wzr\n"
- "2: casa %w0, %w2, %1\n"
- " cbz %w0, 3f\n"
- " ldxr %w0, %1\n"
- " cbz %w0, 2b\n"
- " wfe\n"
- " b 1b\n"
- "3:")
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldaxr %w0, %1\n"
- " cbnz %w0, 2f\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 1b\n"
- "2:",
- /* LSE atomics */
- " mov %w0, wzr\n"
- " casa %w0, %w2, %1\n"
- __nops(2))
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-
- return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- " stlr wzr, %0",
- " swpl wzr, wzr, %0")
- : "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(
- " sevl\n"
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: wfe\n"
- "2: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1b\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: wfe\n"
- "2: ldxr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1b\n"
- " casa %w0, %w1, %2\n"
- " sbc %w0, %w1, %w0\n"
- " cbnz %w0, 2b")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldxr %w0, %2\n"
- " sub %w0, %w0, #1\n"
- " stlxr %w1, %w0, %2\n"
- " cbnz %w1, 1b",
- /* LSE atomics */
- " movn %w0, #0\n"
- " staddl %w0, %2\n"
- __nops(2))
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " mov %w1, #1\n"
- "1: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 2f\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
- "2:",
- /* LSE atomics */
- " ldr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1f\n"
- " casa %w0, %w1, %2\n"
- " sbc %w1, %w1, %w0\n"
- __nops(1)
- "1:")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-
- return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
+#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 55be59a35e3f..6b856012c51b 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,10 +36,6 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
-typedef struct {
- volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
index ffcdf742cddf..8e57c96ad45e 100644
--- a/arch/arm64/include/asm/stack_pointer.h
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_STACK_POINTER_H
#define __ASM_STACK_POINTER_H
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index b86a0865ddf1..58d15be11c4d 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* GCC stack protector support.
*
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index de5600f40adf..8939c87c4dce 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h
index 8da0bf4f7659..eee31a9f72a5 100644
--- a/arch/arm64/include/asm/sync_bitops.h
+++ b/arch/arm64/include/asm/sync_bitops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SYNC_BITOPS_H__
#define __ASM_SYNC_BITOPS_H__
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f707fed5886f..08cc88574659 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -145,10 +145,14 @@
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
+#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
+#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
+#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
+
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
@@ -160,6 +164,8 @@
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
@@ -172,6 +178,99 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+/*** Statistical Profiling Extension ***/
+/* ID registers */
+#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
+#define SYS_PMSIDR_EL1_FE_SHIFT 0
+#define SYS_PMSIDR_EL1_FT_SHIFT 1
+#define SYS_PMSIDR_EL1_FL_SHIFT 2
+#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
+#define SYS_PMSIDR_EL1_LDS_SHIFT 4
+#define SYS_PMSIDR_EL1_ERND_SHIFT 5
+#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
+#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
+#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
+#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
+#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
+
+#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
+#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
+#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
+#define SYS_PMBIDR_EL1_P_SHIFT 4
+#define SYS_PMBIDR_EL1_F_SHIFT 5
+
+/* Sampling controls */
+#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
+#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
+#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
+#define SYS_PMSCR_EL1_CX_SHIFT 3
+#define SYS_PMSCR_EL1_PA_SHIFT 4
+#define SYS_PMSCR_EL1_TS_SHIFT 5
+#define SYS_PMSCR_EL1_PCT_SHIFT 6
+
+#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
+#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
+#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
+#define SYS_PMSCR_EL2_CX_SHIFT 3
+#define SYS_PMSCR_EL2_PA_SHIFT 4
+#define SYS_PMSCR_EL2_TS_SHIFT 5
+#define SYS_PMSCR_EL2_PCT_SHIFT 6
+
+#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
+
+#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
+#define SYS_PMSIRR_EL1_RND_SHIFT 0
+#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
+
+/* Filtering controls */
+#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
+#define SYS_PMSFCR_EL1_FE_SHIFT 0
+#define SYS_PMSFCR_EL1_FT_SHIFT 1
+#define SYS_PMSFCR_EL1_FL_SHIFT 2
+#define SYS_PMSFCR_EL1_B_SHIFT 16
+#define SYS_PMSFCR_EL1_LD_SHIFT 17
+#define SYS_PMSFCR_EL1_ST_SHIFT 18
+
+#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
+#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL
+
+#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
+#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
+
+/* Buffer controls */
+#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
+#define SYS_PMBLIMITR_EL1_E_SHIFT 0
+#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
+#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
+#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
+
+#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
+
+/* Buffer error reporting */
+#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
+#define SYS_PMBSR_EL1_COLL_SHIFT 16
+#define SYS_PMBSR_EL1_S_SHIFT 17
+#define SYS_PMBSR_EL1_EA_SHIFT 18
+#define SYS_PMBSR_EL1_DL_SHIFT 19
+#define SYS_PMBSR_EL1_EC_SHIFT 26
+#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
+
+#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
+#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
+#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+
+/*** End of Statistical Profiling Extension ***/
+
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@@ -250,6 +349,8 @@
#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
+#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
@@ -318,6 +419,10 @@
#define SCTLR_EL1_CP15BEN (1 << 5)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SHA3_SHIFT 32
#define ID_AA64ISAR0_RDM_SHIFT 28
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
#define ID_AA64ISAR0_CRC32_SHIFT 16
@@ -332,6 +437,7 @@
#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
+#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
@@ -340,6 +446,7 @@
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
+#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
@@ -441,6 +548,20 @@
#endif
+/*
+ * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
+ * are reserved by the SVE architecture for future expansion of the LEN
+ * field, with compatible semantics.
+ */
+#define ZCR_ELx_LEN_SHIFT 0
+#define ZCR_ELx_LEN_SIZE 9
+#define ZCR_ELx_LEN_MASK 0x1ff
+
+#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */
+#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+
+
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index ddded6497a8a..eb431286bacd 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -63,6 +63,8 @@ struct thread_info {
void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
+void arch_release_task_struct(struct task_struct *tsk);
+
#endif
/*
@@ -92,6 +94,8 @@ void arch_setup_new_exec(void);
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
+#define TIF_SVE 23 /* Scalable Vector Extension in use */
+#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -105,6 +109,7 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
+#define _TIF_SVE (1 << TIF_SVE)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 8b57339823e9..c4f2d50491eb 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_TOPOLOGY_H
#define __ASM_TOPOLOGY_H
@@ -32,6 +33,14 @@ int pcibus_to_node(struct pci_bus *bus);
#endif /* CONFIG_NUMA */
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_capacity topology_get_freq_scale
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d131501c6222..1696f9de9359 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,9 +34,17 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
+void force_signal_inject(int signal, int code, struct pt_regs *regs,
+ unsigned long address);
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
+/*
+ * Move regs->pc to next instruction and do necessary setup before it
+ * is executed.
+ */
+void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size);
+
static inline int __in_irqentry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__irqentry_text_start &&
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 4318866d053c..4e22b7a8c038 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_XEN_EVENTS_H
#define _ASM_ARM64_XEN_EVENTS_H
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
index ec154e719b11..e6e784051932 100644
--- a/arch/arm64/include/asm/xen/xen-ops.h
+++ b/arch/arm64/include/asm/xen/xen-ops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_XEN_OPS_H
#define _ASM_XEN_OPS_H
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index fc28bd95c6d3..198afbf0688f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
index 4cf0c17787a8..ec0a86d484e1 100644
--- a/arch/arm64/include/uapi/asm/auxvec.h
+++ b/arch/arm64/include/uapi/asm/auxvec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h
index fce9c2924fa3..485d60bee26c 100644
--- a/arch/arm64/include/uapi/asm/bitsperlong.h
+++ b/arch/arm64/include/uapi/asm/bitsperlong.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index dc19e9537f0d..ca9cfdab33fc 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/fcntl.h b/arch/arm64/include/uapi/asm/fcntl.h
index cd2e630c235e..f8db34f2622d 100644
--- a/arch/arm64/include/uapi/asm/fcntl.h
+++ b/arch/arm64/include/uapi/asm/fcntl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 4b9344cba83a..cda76fa8b9b2 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
@@ -36,5 +37,11 @@
#define HWCAP_FCMA (1 << 14)
#define HWCAP_LRCPC (1 << 15)
#define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 9f3ca24bbcc6..51149ec75fe4 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
diff --git a/arch/arm64/include/uapi/asm/param.h b/arch/arm64/include/uapi/asm/param.h
index 8e3a281d448a..b115c5557781 100644
--- a/arch/arm64/include/uapi/asm/param.h
+++ b/arch/arm64/include/uapi/asm/param.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
index 172b8317ee49..d54daafa89e3 100644
--- a/arch/arm64/include/uapi/asm/perf_regs.h
+++ b/arch/arm64/include/uapi/asm/perf_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_ARM64_PERF_REGS_H
#define _ASM_ARM64_PERF_REGS_H
diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h
index 7985ff60ca3f..b1c2e0df92dc 100644
--- a/arch/arm64/include/uapi/asm/posix_types.h
+++ b/arch/arm64/include/uapi/asm/posix_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_POSIX_TYPES_H
#define __ASM_POSIX_TYPES_H
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index d1ff83dfe5de..98c4ce55d9c3 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Based on arch/arm/include/asm/ptrace.h
*
@@ -22,6 +23,7 @@
#include <linux/types.h>
#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
/*
@@ -46,7 +48,6 @@
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
-#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
@@ -63,6 +64,8 @@
#ifndef __ASSEMBLY__
+#include <linux/prctl.h>
+
/*
* User structures for general purpose, floating point and debug registers.
*/
@@ -90,6 +93,141 @@ struct user_hwdebug_state {
} dbg_regs[16];
};
+/* SVE/FP/SIMD state (NT_ARM_SVE) */
+
+struct user_sve_header {
+ __u32 size; /* total meaningful regset content in bytes */
+ __u32 max_size; /* maxmium possible size for this thread */
+ __u16 vl; /* current vector length */
+ __u16 max_vl; /* maximum possible vector length */
+ __u16 flags;
+ __u16 __reserved;
+};
+
+/* Definitions for user_sve_header.flags: */
+#define SVE_PT_REGS_MASK (1 << 0)
+
+#define SVE_PT_REGS_FPSIMD 0
+#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
+
+/*
+ * Common SVE_PT_* flags:
+ * These must be kept in sync with prctl interface in <linux/ptrace.h>
+ */
+#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
+#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
+
+
+/*
+ * The remainder of the SVE state follows struct user_sve_header. The
+ * total size of the SVE state (including header) depends on the
+ * metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size
+ * of the state in bytes, including the header.
+ *
+ * Refer to <asm/sigcontext.h> for details of how to pass the correct
+ * "vq" argument to these macros.
+ */
+
+/* Offset from the start of struct user_sve_header to the register data */
+#define SVE_PT_REGS_OFFSET \
+ ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+/*
+ * The register data content and layout depends on the value of the
+ * flags field.
+ */
+
+/*
+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:
+ *
+ * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type
+ * struct user_fpsimd_state. Additional data might be appended in the
+ * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.
+ * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than
+ * sizeof(struct user_fpsimd_state).
+ */
+
+#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET
+
+#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state))
+
+/*
+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:
+ *
+ * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size
+ * SVE_PT_SVE_SIZE(vq, flags).
+ *
+ * Additional macros describe the contents and layout of the payload.
+ * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to
+ * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is
+ * the size in bytes:
+ *
+ * x type description
+ * - ---- -----------
+ * ZREGS \
+ * ZREG |
+ * PREGS | refer to <asm/sigcontext.h>
+ * PREG |
+ * FFR /
+ *
+ * FPSR uint32_t FPSR
+ * FPCR uint32_t FPCR
+ *
+ * Additional data might be appended in the future.
+ */
+
+#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
+#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
+#define SVE_PT_SVE_FPSR_SIZE sizeof(__u32)
+#define SVE_PT_SVE_FPCR_SIZE sizeof(__u32)
+
+#define __SVE_SIG_TO_PT(offset) \
+ ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
+
+#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
+
+#define SVE_PT_SVE_ZREGS_OFFSET \
+ __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
+#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
+ __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
+#define SVE_PT_SVE_ZREGS_SIZE(vq) \
+ (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
+
+#define SVE_PT_SVE_PREGS_OFFSET(vq) \
+ __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
+#define SVE_PT_SVE_PREG_OFFSET(vq, n) \
+ __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
+#define SVE_PT_SVE_PREGS_SIZE(vq) \
+ (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
+ SVE_PT_SVE_PREGS_OFFSET(vq))
+
+#define SVE_PT_SVE_FFR_OFFSET(vq) \
+ __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
+
+#define SVE_PT_SVE_FPSR_OFFSET(vq) \
+ ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
+ (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_SVE_FPCR_OFFSET(vq) \
+ (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
+
+/*
+ * Any future extension appended after FPCR must be aligned to the next
+ * 128-bit boundary.
+ */
+
+#define SVE_PT_SVE_SIZE(vq, flags) \
+ ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
+ - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+#define SVE_PT_SIZE(vq, flags) \
+ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+ SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
+ : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/include/uapi/asm/setup.h b/arch/arm64/include/uapi/asm/setup.h
index 9cf2e46fbbdf..5d703888f351 100644
--- a/arch/arm64/include/uapi/asm/setup.h
+++ b/arch/arm64/include/uapi/asm/setup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Based on arch/arm/include/asm/setup.h
*
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index f0a76b9fcd6e..dca8f8b5168b 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
@@ -16,6 +17,8 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
/*
@@ -41,10 +44,11 @@ struct sigcontext {
*
* 0x210 fpsimd_context
* 0x10 esr_context
+ * 0x8a0 sve_context (vl <= 64) (optional)
* 0x20 extra_context (optional)
* 0x10 terminator (null _aarch64_ctx)
*
- * 0xdb0 (reserved for future allocation)
+ * 0x510 (reserved for future allocation)
*
* New records that can exceed this space need to be opt-in for userspace, so
* that an expanded signal frame is not generated unexpectedly. The mechanism
@@ -116,4 +120,119 @@ struct extra_context {
__u32 __reserved[3];
};
+#define SVE_MAGIC 0x53564501
+
+struct sve_context {
+ struct _aarch64_ctx head;
+ __u16 vl;
+ __u16 __reserved[3];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The SVE architecture leaves space for future expansion of the
+ * vector length beyond its initial architectural limit of 2048 bits
+ * (16 quadwords).
+ *
+ * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
+ * terminology.
+ */
+#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
+
+#define SVE_VQ_MIN 1
+#define SVE_VQ_MAX 512
+
+#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
+#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
+
+#define SVE_NUM_ZREGS 32
+#define SVE_NUM_PREGS 16
+
+#define sve_vl_valid(vl) \
+ ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
+#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
+#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
+
+/*
+ * If the SVE registers are currently live for the thread at signal delivery,
+ * sve_context.head.size >=
+ * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
+ * and the register data may be accessed using the SVE_SIG_*() macros.
+ *
+ * If sve_context.head.size <
+ * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
+ * the SVE registers were not live for the thread and no register data
+ * is included: in this case, the SVE_SIG_*() macros should not be
+ * used except for this check.
+ *
+ * The same convention applies when returning from a signal: a caller
+ * will need to remove or resize the sve_context block if it wants to
+ * make the SVE registers live when they were previously non-live or
+ * vice-versa. This may require the the caller to allocate fresh
+ * memory and/or move other context blocks in the signal frame.
+ *
+ * Changing the vector length during signal return is not permitted:
+ * sve_context.vl must equal the thread's current vector length when
+ * doing a sigreturn.
+ *
+ *
+ * Note: for all these macros, the "vq" argument denotes the SVE
+ * vector length in quadwords (i.e., units of 128 bits).
+ *
+ * The correct way to obtain vq is to use sve_vq_from_vl(vl). The
+ * result is valid if and only if sve_vl_valid(vl) is true. This is
+ * guaranteed for a struct sve_context written by the kernel.
+ *
+ *
+ * Additional macros describe the contents and layout of the payload.
+ * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
+ * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
+ * size in bytes:
+ *
+ * x type description
+ * - ---- -----------
+ * REGS the entire SVE context
+ *
+ * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers
+ * ZREG __uint128_t[vq] individual Z-register Zn
+ *
+ * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers
+ * PREG uint16_t[vq] individual P-register Pn
+ *
+ * FFR uint16_t[vq] first-fault status register
+ *
+ * Additional data might be appended in the future.
+ */
+
+#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES)
+#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8))
+#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+
+#define SVE_SIG_REGS_OFFSET \
+ ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREG_OFFSET(vq, n) \
+ (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
+#define SVE_SIG_ZREGS_SIZE(vq) \
+ (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+
+#define SVE_SIG_PREGS_OFFSET(vq) \
+ (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+#define SVE_SIG_PREG_OFFSET(vq, n) \
+ (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
+#define SVE_SIG_PREGS_SIZE(vq) \
+ (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+
+#define SVE_SIG_FFR_OFFSET(vq) \
+ (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+
+#define SVE_SIG_REGS_SIZE(vq) \
+ (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
+
+#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+
+
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h
index 5a74a0853db0..574d12f86039 100644
--- a/arch/arm64/include/uapi/asm/siginfo.h
+++ b/arch/arm64/include/uapi/asm/siginfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h
index 991bf5db2ca1..bdf0cfb046c9 100644
--- a/arch/arm64/include/uapi/asm/signal.h
+++ b/arch/arm64/include/uapi/asm/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h
index eeb702e5074a..313325fa22fa 100644
--- a/arch/arm64/include/uapi/asm/stat.h
+++ b/arch/arm64/include/uapi/asm/stat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/statfs.h b/arch/arm64/include/uapi/asm/statfs.h
index 6f6219050978..615357b6ba89 100644
--- a/arch/arm64/include/uapi/asm/statfs.h
+++ b/arch/arm64/include/uapi/asm/statfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/ucontext.h b/arch/arm64/include/uapi/asm/ucontext.h
index 791de8e89e35..ee02721a704d 100644
--- a/arch/arm64/include/uapi/asm/ucontext.h
+++ b/arch/arm64/include/uapi/asm/ucontext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index 043d17a21342..5072cbd15c82 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f2b4e816b6de..8265dd790895 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux kernel.
#
@@ -10,8 +11,6 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_return_address.o = -pg
-CFLAGS_setup.o = -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
# Object file lists.
arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index f01fab637dab..d190a7b231bf 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ACPI 5.1 based NUMA setup for ARM64
* Lots of code was borrowed from arch/x86/mm/srat.c
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index f0e6d717885b..c33b5e4010ab 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -228,15 +228,7 @@ ret:
return ret;
}
-static struct ctl_table ctl_abi[] = {
- {
- .procname = "abi",
- .mode = 0555,
- },
- { }
-};
-
-static void __init register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(void)
{
unsigned long flags;
int i = 0;
@@ -262,8 +254,7 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
}
raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
- table->child = insns_sysctl;
- register_sysctl_table(table);
+ register_sysctl("abi", insns_sysctl);
}
/*
@@ -431,7 +422,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return 0;
fault:
@@ -512,7 +503,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return 0;
}
@@ -586,14 +577,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
static int a32_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 9) & 1);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return rc;
}
static int t16_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 3) & 1);
- regs->pc += 2;
+ arm64_skip_faulting_instruction(regs, 2);
return rc;
}
@@ -644,9 +635,9 @@ static int __init armv8_deprecated_init(void)
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
"arm64/isndep:starting",
run_all_insn_set_hw_mode, NULL);
- register_insn_emulation_sysctl(ctl_abi);
+ register_insn_emulation_sysctl();
return 0;
}
-late_initcall(armv8_deprecated_init);
+core_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index cd52d365d1f0..c5ba0097887f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -27,6 +27,7 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
+#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
@@ -51,6 +52,21 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+/*
+ * Flag to indicate if we have computed the system wide
+ * capabilities based on the boot time active CPUs. This
+ * will be used to determine if a new booting CPU should
+ * go through the verification process to make sure that it
+ * supports the system capabilities, without using a hotplug
+ * notifier.
+ */
+static bool sys_caps_initialised;
+
+static inline void set_sys_caps_initialised(void)
+{
+ sys_caps_initialised = true;
+}
+
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
@@ -107,7 +123,11 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
@@ -117,34 +137,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
/* Linux doesn't care about the EL3 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
/*
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
@@ -155,20 +176,20 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -193,14 +214,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
ARM64_FTR_END,
};
@@ -221,8 +242,8 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
};
static const struct arm64_ftr_bits ftr_mvfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
ARM64_FTR_END,
};
@@ -234,25 +255,25 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
static const struct arm64_ftr_bits ftr_id_isar5[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
ARM64_FTR_END,
};
@@ -268,6 +289,12 @@ static const struct arm64_ftr_bits ftr_id_dfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_zcr[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
+ ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
+ ARM64_FTR_END,
+};
+
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@@ -334,6 +361,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -348,6 +376,9 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
+ /* Op1 = 0, CRn = 1, CRm = 2 */
+ ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+
/* Op1 = 3, CRn = 0, CRm = 0 */
{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -485,6 +516,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -505,6 +537,10 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
+ sve_init_vq_map();
+ }
}
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -608,6 +644,9 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
+ info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
+
/*
* If we have AArch32, we care about 32-bit features for compat.
* If the system doesn't support AArch32, don't update them.
@@ -655,6 +694,16 @@ void update_cpu_features(int cpu,
info->reg_mvfr2, boot->reg_mvfr2);
}
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
+ info->reg_zcr, boot->reg_zcr);
+
+ /* Probe vector lengths, unless we already gave up on SVE */
+ if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
+ !sys_caps_initialised)
+ sve_update_vq_map();
+ }
+
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@@ -900,6 +949,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 1,
},
#endif
+#ifdef CONFIG_ARM64_SVE
+ {
+ .desc = "Scalable Vector Extension",
+ .capability = ARM64_SVE,
+ .def_scope = SCOPE_SYSTEM,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_SVE_SHIFT,
+ .min_field_value = ID_AA64PFR0_SVE,
+ .matches = has_cpuid_feature,
+ .enable = sve_kernel_enable,
+ },
+#endif /* CONFIG_ARM64_SVE */
{},
};
@@ -921,9 +983,14 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
@@ -932,6 +999,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
+#ifdef CONFIG_ARM64_SVE
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+#endif
{},
};
@@ -1041,21 +1111,6 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
}
/*
- * Flag to indicate if we have computed the system wide
- * capabilities based on the boot time active CPUs. This
- * will be used to determine if a new booting CPU should
- * go through the verification process to make sure that it
- * supports the system capabilities, without using a hotplug
- * notifier.
- */
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
-{
- sys_caps_initialised = true;
-}
-
-/*
* Check for CPU features that are used in early boot
* based on the Boot CPU value.
*/
@@ -1097,6 +1152,23 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
}
}
+static void verify_sve_features(void)
+{
+ u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ u64 zcr = read_zcr_features();
+
+ unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
+ unsigned int len = zcr & ZCR_ELx_LEN_MASK;
+
+ if (len < safe_len || sve_verify_vq_map()) {
+ pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Add checks on other ZCR bits here if necessary */
+}
+
/*
* Run through the enabled system capabilities and enable() it on this CPU.
* The capabilities were decided based on the available CPUs at the boot time.
@@ -1110,8 +1182,12 @@ static void verify_local_cpu_capabilities(void)
verify_local_cpu_errata_workarounds();
verify_local_cpu_features(arm64_features);
verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
if (system_supports_32bit_el0())
verify_local_elf_hwcaps(compat_elf_hwcaps);
+
+ if (system_supports_sve())
+ verify_sve_features();
}
void check_local_cpu_capabilities(void)
@@ -1189,6 +1265,8 @@ void __init setup_cpu_features(void)
if (system_supports_32bit_el0())
setup_elf_hwcaps(compat_elf_hwcaps);
+ sve_setup();
+
/* Advertise that we have computed the system capabilities */
set_sys_caps_initialised();
@@ -1287,7 +1365,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
if (!rc) {
dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
pt_regs_write_reg(regs, dst, val);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
return rc;
@@ -1307,4 +1385,4 @@ static int __init enable_mrs_emulation(void)
return 0;
}
-late_initcall(enable_mrs_emulation);
+core_initcall(enable_mrs_emulation);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 311885962830..1e2554543506 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -69,6 +70,12 @@ static const char *const hwcap_str[] = {
"fcma",
"lrcpc",
"dcpop",
+ "sha3",
+ "sm3",
+ "sm4",
+ "asimddp",
+ "sha512",
+ "sve",
NULL
};
@@ -326,6 +333,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
/* Update the 32bit ID registers only if AArch32 is implemented */
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
@@ -348,6 +356,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
}
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ id_aa64pfr0_sve(info->reg_id_aa64pfr0))
+ info->reg_zcr = read_zcr_features();
+
cpuinfo_detect_icache_policy(info);
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index c7ef99904934..a88b6ccebbb4 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,6 +30,7 @@
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/system_misc.h>
@@ -46,9 +47,9 @@ u8 debug_monitors_arch(void)
static void mdscr_write(u32 mdscr)
{
unsigned long flags;
- local_dbg_save(flags);
+ flags = local_daif_save();
write_sysreg(mdscr, mdscr_el1);
- local_dbg_restore(flags);
+ local_daif_restore(flags);
}
NOKPROBE_SYMBOL(mdscr_write);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6a27cd6dbfa6..73f17bffcd23 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,3 +41,20 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_ARM64_SVE
+ENTRY(sve_save_state)
+ sve_save 0, x1, 2
+ ret
+ENDPROC(sve_save_state)
+
+ENTRY(sve_load_state)
+ sve_load 0, x1, x2, 3
+ ret
+ENDPROC(sve_load_state)
+
+ENTRY(sve_get_vl)
+ _sve_rdvl 0, 1
+ ret
+ENDPROC(sve_get_vl)
+#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e1be42e11ff5..1175f5827ae1 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -108,13 +108,8 @@ ENTRY(_mcount)
mcount_get_lr x1 // function's lr (= parent's pc)
blr x2 // (*ftrace_trace_function)(pc, lr);
-#ifndef CONFIG_FUNCTION_GRAPH_TRACER
-skip_ftrace_call: // return;
- mcount_exit // }
-#else
- mcount_exit // return;
- // }
-skip_ftrace_call:
+skip_ftrace_call: // }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr_l x2, ftrace_graph_return
cmp x0, x2 // if ((ftrace_graph_return
b.ne ftrace_graph_caller // != ftrace_stub)
@@ -123,9 +118,8 @@ skip_ftrace_call:
adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
-
- mcount_exit
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ mcount_exit
ENDPROC(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1c59d4008a8..6d14b8f29b5f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,7 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
-#include <asm/memory.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
@@ -221,6 +221,8 @@ alternative_else_nop_endif
.macro kernel_exit, el
.if \el != 0
+ disable_daif
+
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
@@ -373,18 +375,18 @@ ENTRY(vectors)
kernel_ventry el1_sync // Synchronous EL1h
kernel_ventry el1_irq // IRQ EL1h
kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error_invalid // Error EL1h
+ kernel_ventry el1_error // Error EL1h
kernel_ventry el0_sync // Synchronous 64-bit EL0
kernel_ventry el0_irq // IRQ 64-bit EL0
kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry el0_error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
kernel_ventry el0_irq_compat // IRQ 32-bit EL0
kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry el0_error_compat // Error 32-bit EL0
#else
kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
@@ -453,10 +455,6 @@ ENDPROC(el0_error_invalid)
el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
-
-el0_error_invalid_compat:
- inv_entry 0, BAD_ERROR, 32
-ENDPROC(el0_error_invalid_compat)
#endif
el1_sync_invalid:
@@ -508,24 +506,18 @@ el1_da:
* Data abort handling
*/
mrs x3, far_el1
- enable_dbg
- // re-enable interrupts if they were enabled in the aborted context
- tbnz x23, #7, 1f // PSR_I_BIT
- enable_irq
-1:
+ inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
- // disable interrupts before pulling preserved data off the stack
- disable_irq
kernel_exit 1
el1_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
ASM_BUG()
@@ -533,7 +525,7 @@ el1_undef:
/*
* Undefined instruction
*/
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
ASM_BUG()
@@ -550,7 +542,7 @@ el1_dbg:
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
@@ -561,7 +553,7 @@ ENDPROC(el1_sync)
.align 6
el1_irq:
kernel_entry 1
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -607,6 +599,8 @@ el0_sync:
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
+ cmp x24, #ESR_ELx_EC_SVE // SVE access
+ b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
@@ -658,6 +652,7 @@ el0_svc_compat:
/*
* AArch32 syscall handling
*/
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, compat_sys_call_table // load compat syscall table pointer
mov wscno, w7 // syscall number in w7 (r7)
mov wsc_nr, #__NR_compat_syscalls
@@ -667,6 +662,10 @@ el0_svc_compat:
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+
+el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
#endif
el0_da:
@@ -674,8 +673,7 @@ el0_da:
* Data abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
clear_address_tag x0, x26
mov x1, x25
@@ -687,8 +685,7 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -699,17 +696,27 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
+el0_sve_acc:
+ /*
+ * Scalable Vector Extension access
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sve_acc
+ b ret_to_user
el0_fpsimd_exc:
/*
- * Floating Point or Advanced SIMD exception
+ * Floating Point, Advanced SIMD or SVE exception
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -720,8 +727,7 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -732,8 +738,7 @@ el0_undef:
/*
* Undefined instruction
*/
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, sp
bl do_undefinstr
@@ -742,7 +747,7 @@ el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -757,11 +762,11 @@ el0_dbg:
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_dbg
+ enable_daif
ct_user_exit
b ret_to_user
el0_inv:
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
@@ -774,7 +779,7 @@ ENDPROC(el0_sync)
el0_irq:
kernel_entry 0
el0_irq_naked:
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -788,12 +793,34 @@ el0_irq_naked:
b ret_to_user
ENDPROC(el0_irq)
+el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ kernel_exit 1
+ENDPROC(el1_error)
+
+el0_error:
+ kernel_entry 0
+el0_error_naked:
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ enable_daif
+ ct_user_exit
+ b ret_to_user
+ENDPROC(el0_error)
+
+
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
ret_fast_syscall:
- disable_irq // disable interrupts
+ disable_daif
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
@@ -803,7 +830,7 @@ ret_fast_syscall:
enable_step_tsk x1, x2
kernel_exit 0
ret_fast_syscall_trace:
- enable_irq // enable interrupts
+ enable_daif
b __sys_trace_return_skipped // we already saved x0
/*
@@ -821,7 +848,7 @@ work_pending:
* "slow" syscall return path.
*/
ret_to_user:
- disable_irq // disable interrupts
+ disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -835,16 +862,37 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, sys_call_table // load syscall table pointer
mov wscno, w8 // syscall number in w8
mov wsc_nr, #__NR_syscalls
+
+#ifdef CONFIG_ARM64_SVE
+alternative_if_not ARM64_SVE
+ b el0_svc_naked
+alternative_else_nop_endif
+ tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
+ bic x16, x16, #_TIF_SVE // discard SVE state
+ str x16, [tsk, #TSK_TI_FLAGS]
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case:
+ */
+ mrs x9, cpacr_el1
+ bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
+ msr cpacr_el1, x9 // synchronised by eret to el0
+#endif
+
el0_svc_naked: // compat entry point
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_dbg_and_irq
+ enable_daif
ct_user_exit 1
- ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
- tst x16, #_TIF_SYSCALL_WORK
+ tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f444f374bd7b..143b3e72c25e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,19 +17,34 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bitmap.h>
#include <linux/bottom_half.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/compat.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/prctl.h>
#include <linux/preempt.h>
+#include <linux/prctl.h>
+#include <linux/ptrace.h>
#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>
#include <asm/simd.h>
+#include <asm/sigcontext.h>
+#include <asm/sysreg.h>
+#include <asm/traps.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -39,6 +54,8 @@
#define FPEXC_IDF (1 << 7)
/*
+ * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
+ *
* In order to reduce the number of times the FPSIMD state is needlessly saved
* and restored, we need to keep track of two things:
* (a) for each task, we need to remember which CPU was the last one to have
@@ -99,10 +116,741 @@
*/
static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+/* Default VL for tasks that don't set it explicitly: */
+static int sve_default_vl = -1;
+
+#ifdef CONFIG_ARM64_SVE
+
+/* Maximum supported vector length across all CPUs (initially poisoned) */
+int __ro_after_init sve_max_vl = -1;
+/* Set of available vector lengths, as vq_to_bit(vq): */
+static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+static void __percpu *efi_sve_state;
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Dummy declaration for code that will be optimised out: */
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern void __percpu *efi_sve_state;
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+/*
+ * Call __sve_free() directly only if you know task can't be scheduled
+ * or preempted.
+ */
+static void __sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
+}
+
+static void sve_free(struct task_struct *task)
+{
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+
+ __sve_free(task);
+}
+
+
+/* Offset of FFR in the SVE register dump */
+static size_t sve_ffr_offset(int vl)
+{
+ return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
+}
+
+static void *sve_pffr(struct task_struct *task)
+{
+ return (char *)task->thread.sve_state +
+ sve_ffr_offset(task->thread.sve_vl);
+}
+
+static void change_cpacr(u64 val, u64 mask)
+{
+ u64 cpacr = read_sysreg(CPACR_EL1);
+ u64 new = (cpacr & ~mask) | val;
+
+ if (new != cpacr)
+ write_sysreg(new, CPACR_EL1);
+}
+
+static void sve_user_disable(void)
+{
+ change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
+}
+
+static void sve_user_enable(void)
+{
+ change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
+}
+
+/*
+ * TIF_SVE controls whether a task can use SVE without trapping while
+ * in userspace, and also the way a task's FPSIMD/SVE state is stored
+ * in thread_struct.
+ *
+ * The kernel uses this flag to track whether a user task is actively
+ * using SVE, and therefore whether full SVE register state needs to
+ * be tracked. If not, the cheaper FPSIMD context handling code can
+ * be used instead of the more costly SVE equivalents.
+ *
+ * * TIF_SVE set:
+ *
+ * The task can execute SVE instructions while in userspace without
+ * trapping to the kernel.
+ *
+ * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
+ * corresponding Zn), P0-P15 and FFR are encoded in in
+ * task->thread.sve_state, formatted appropriately for vector
+ * length task->thread.sve_vl.
+ *
+ * task->thread.sve_state must point to a valid buffer at least
+ * sve_state_size(task) bytes in size.
+ *
+ * During any syscall, the kernel may optionally clear TIF_SVE and
+ * discard the vector state except for the FPSIMD subset.
+ *
+ * * TIF_SVE clear:
+ *
+ * An attempt by the user task to execute an SVE instruction causes
+ * do_sve_acc() to be called, which does some preparation and then
+ * sets TIF_SVE.
+ *
+ * When stored, FPSIMD registers V0-V31 are encoded in
+ * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are
+ * logically zero but not stored anywhere; P0-P15 and FFR are not
+ * stored and have unspecified values from userspace's point of
+ * view. For hygiene purposes, the kernel zeroes them on next use,
+ * but userspace is discouraged from relying on this.
+ *
+ * task->thread.sve_state does not need to be non-NULL, valid or any
+ * particular size: it must not be dereferenced.
+ *
+ * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of
+ * whether TIF_SVE is clear or set, since these are not vector length
+ * dependent.
+ */
+
+/*
+ * Update current's FPSIMD/SVE registers from thread_struct.
+ *
+ * This function should be called only when the FPSIMD/SVE state in
+ * thread_struct is known to be up to date, when preparing to enter
+ * userspace.
+ *
+ * Softirqs (and preemption) must be disabled.
+ */
+static void task_fpsimd_load(void)
+{
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_load_state(sve_pffr(current),
+ &current->thread.fpsimd_state.fpsr,
+ sve_vq_from_vl(current->thread.sve_vl) - 1);
+ else
+ fpsimd_load_state(&current->thread.fpsimd_state);
+
+ if (system_supports_sve()) {
+ /* Toggle SVE trapping for userspace if needed */
+ if (test_thread_flag(TIF_SVE))
+ sve_user_enable();
+ else
+ sve_user_disable();
+
+ /* Serialised by exception return to user */
+ }
+}
+
+/*
+ * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
+ * with respect to the CPU registers.
+ *
+ * Softirqs (and preemption) must be disabled.
+ */
+static void task_fpsimd_save(void)
+{
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ /*
+ * Can't save the user regs, so current would
+ * re-enter user with corrupt state.
+ * There's no way to recover, so kill it:
+ */
+ force_signal_inject(
+ SIGKILL, 0, current_pt_regs(), 0);
+ return;
+ }
+
+ sve_save_state(sve_pffr(current),
+ &current->thread.fpsimd_state.fpsr);
+ } else
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ }
+}
+
+/*
+ * Helpers to translate bit indices in sve_vq_map to VQ values (and
+ * vice versa). This allows find_next_bit() to be used to find the
+ * _maximum_ VQ not exceeding a certain value.
+ */
+
+static unsigned int vq_to_bit(unsigned int vq)
+{
+ return SVE_VQ_MAX - vq;
+}
+
+static unsigned int bit_to_vq(unsigned int bit)
+{
+ if (WARN_ON(bit >= SVE_VQ_MAX))
+ bit = SVE_VQ_MAX - 1;
+
+ return SVE_VQ_MAX - bit;
+}
+
+/*
+ * All vector length selection from userspace comes through here.
+ * We're on a slow path, so some sanity-checks are included.
+ * If things go wrong there's a bug somewhere, but try to fall back to a
+ * safe choice.
+ */
+static unsigned int find_supported_vector_length(unsigned int vl)
+{
+ int bit;
+ int max_vl = sve_max_vl;
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ if (WARN_ON(!sve_vl_valid(max_vl)))
+ max_vl = SVE_VL_MIN;
+
+ if (vl > max_vl)
+ vl = max_vl;
+
+ bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
+ vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(bit_to_vq(bit));
+}
+
+#ifdef CONFIG_SYSCTL
+
+static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ int vl = sve_default_vl;
+ struct ctl_table tmp_table = {
+ .data = &vl,
+ .maxlen = sizeof(vl),
+ };
+
+ ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ /* Writing -1 has the special meaning "set to max": */
+ if (vl == -1) {
+ /* Fail safe if sve_max_vl wasn't initialised */
+ if (WARN_ON(!sve_vl_valid(sve_max_vl)))
+ vl = SVE_VL_MIN;
+ else
+ vl = sve_max_vl;
+
+ goto chosen;
+ }
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ vl = find_supported_vector_length(vl);
+chosen:
+ sve_default_vl = vl;
+ return 0;
+}
+
+static struct ctl_table sve_default_vl_table[] = {
+ {
+ .procname = "sve_default_vector_length",
+ .mode = 0644,
+ .proc_handler = sve_proc_do_default_vl,
+ },
+ { }
+};
+
+static int __init sve_sysctl_init(void)
+{
+ if (system_supports_sve())
+ if (!register_sysctl("abi", sve_default_vl_table))
+ return -EINVAL;
+
+ return 0;
+}
+
+#else /* ! CONFIG_SYSCTL */
+static int __init sve_sysctl_init(void) { return 0; }
+#endif /* ! CONFIG_SYSCTL */
+
+#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
+ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+
+/*
+ * Transfer the FPSIMD state in task->thread.fpsimd_state to
+ * task->thread.sve_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * softirqs (and preemption) must be disabled.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.fpsimd_state must be up to date before calling this function.
+ */
+static void fpsimd_to_sve(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct fpsimd_state const *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ for (i = 0; i < 32; ++i)
+ memcpy(ZREG(sst, vq, i), &fst->vregs[i],
+ sizeof(fst->vregs[i]));
+}
+
+/*
+ * Transfer the SVE state in task->thread.sve_state to
+ * task->thread.fpsimd_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * softirqs (and preemption) must be disabled.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.sve_state must be up to date before calling this function.
+ */
+static void sve_to_fpsimd(struct task_struct *task)
+{
+ unsigned int vq;
+ void const *sst = task->thread.sve_state;
+ struct fpsimd_state *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ for (i = 0; i < 32; ++i)
+ memcpy(&fst->vregs[i], ZREG(sst, vq, i),
+ sizeof(fst->vregs[i]));
+}
+
+#ifdef CONFIG_ARM64_SVE
+
+/*
+ * Return how many bytes of memory are required to store the full SVE
+ * state for task, given task's currently configured vector length.
+ */
+size_t sve_state_size(struct task_struct const *task)
+{
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
+}
+
+/*
+ * Ensure that task->thread.sve_state is allocated and sufficiently large.
+ *
+ * This function should be used only in preparation for replacing
+ * task->thread.sve_state with new data. The memory is always zeroed
+ * here to prevent stale data from showing through: this is done in
+ * the interest of testability and predictability: except in the
+ * do_sve_acc() case, there is no ABI requirement to hide stale data
+ * written previously be task.
+ */
+void sve_alloc(struct task_struct *task)
+{
+ if (task->thread.sve_state) {
+ memset(task->thread.sve_state, 0, sve_state_size(current));
+ return;
+ }
+
+ /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
+ task->thread.sve_state =
+ kzalloc(sve_state_size(task), GFP_KERNEL);
+
+ /*
+ * If future SVE revisions can have larger vectors though,
+ * this may cease to be true:
+ */
+ BUG_ON(!task->thread.sve_state);
+}
+
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the user task, irrespective of when SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_sync_to_sve(struct task_struct *task)
+{
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ fpsimd_to_sve(task);
+}
+
+/*
+ * Ensure that task->thread.fpsimd_state is up to date with respect to
+ * the user task, irrespective of whether SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void sve_sync_to_fpsimd(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+}
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the task->thread.fpsimd_state.
+ *
+ * This should only be called by ptrace to merge new FPSIMD register
+ * values into a task for which SVE is currently active.
+ * task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.fpsimd_state must already have been initialised with
+ * the new FPSIMD register values to be merged in.
+ */
+void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct fpsimd_state const *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+
+ memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
+
+ for (i = 0; i < 32; ++i)
+ memcpy(ZREG(sst, vq, i), &fst->vregs[i],
+ sizeof(fst->vregs[i]));
+}
+
+int sve_set_vector_length(struct task_struct *task,
+ unsigned long vl, unsigned long flags)
+{
+ if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ return -EINVAL;
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ /*
+ * Clamp to the maximum vector length that VL-agnostic SVE code can
+ * work with. A flag may be assigned in the future to allow setting
+ * of larger vector lengths without confusing older software.
+ */
+ if (vl > SVE_VL_ARCH_MAX)
+ vl = SVE_VL_ARCH_MAX;
+
+ vl = find_supported_vector_length(vl);
+
+ if (flags & (PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ task->thread.sve_vl_onexec = vl;
+ else
+ /* Reset VL to system default on next exec: */
+ task->thread.sve_vl_onexec = 0;
+
+ /* Only actually set the VL if not deferred: */
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ goto out;
+
+ if (vl == task->thread.sve_vl)
+ goto out;
+
+ /*
+ * To ensure the FPSIMD bits of the SVE vector registers are preserved,
+ * write any live register state back to task_struct, and convert to a
+ * non-SVE thread.
+ */
+ if (task == current) {
+ local_bh_disable();
+
+ task_fpsimd_save();
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
+
+ fpsimd_flush_task_state(task);
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+
+ if (task == current)
+ local_bh_enable();
+
+ /*
+ * Force reallocation of task SVE state to the correct size
+ * on next use:
+ */
+ sve_free(task);
+
+ task->thread.sve_vl = vl;
+
+out:
+ if (flags & PR_SVE_VL_INHERIT)
+ set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
+ else
+ clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
+
+ return 0;
+}
+
+/*
+ * Encode the current vector length and flags for return.
+ * This is only required for prctl(): ptrace has separate fields
+ *
+ * flags are as for sve_set_vector_length().
+ */
+static int sve_prctl_status(unsigned long flags)
+{
+ int ret;
+
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ ret = current->thread.sve_vl_onexec;
+ else
+ ret = current->thread.sve_vl;
+
+ if (test_thread_flag(TIF_SVE_VL_INHERIT))
+ ret |= PR_SVE_VL_INHERIT;
+
+ return ret;
+}
+
+/* PR_SVE_SET_VL */
+int sve_set_current_vl(unsigned long arg)
+{
+ unsigned long vl, flags;
+ int ret;
+
+ vl = arg & PR_SVE_VL_LEN_MASK;
+ flags = arg & ~vl;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ ret = sve_set_vector_length(current, vl, flags);
+ if (ret)
+ return ret;
+
+ return sve_prctl_status(flags);
+}
+
+/* PR_SVE_GET_VL */
+int sve_get_current_vl(void)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_prctl_status(0);
+}
+
+/*
+ * Bitmap for temporary storage of the per-CPU set of supported vector lengths
+ * during secondary boot.
+ */
+static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
+
+static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
+{
+ unsigned int vq, vl;
+ unsigned long zcr;
+
+ bitmap_zero(map, SVE_VQ_MAX);
+
+ zcr = ZCR_ELx_LEN_MASK;
+ zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
+
+ for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
+ write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
+ vl = sve_get_vl();
+ vq = sve_vq_from_vl(vl); /* skip intervening lengths */
+ set_bit(vq_to_bit(vq), map);
+ }
+}
+
+void __init sve_init_vq_map(void)
+{
+ sve_probe_vqs(sve_vq_map);
+}
+
+/*
+ * If we haven't committed to the set of supported VQs yet, filter out
+ * those not supported by the current CPU.
+ */
+void sve_update_vq_map(void)
+{
+ sve_probe_vqs(sve_secondary_vq_map);
+ bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+}
+
+/* Check whether the current CPU supports all VQs in the committed set */
+int sve_verify_vq_map(void)
+{
+ int ret = 0;
+
+ sve_probe_vqs(sve_secondary_vq_map);
+ bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
+ SVE_VQ_MAX);
+ if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
+ smp_processor_id());
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void __init sve_efi_setup(void)
+{
+ if (!IS_ENABLED(CONFIG_EFI))
+ return;
+
+ /*
+ * alloc_percpu() warns and prints a backtrace if this goes wrong.
+ * This is evidence of a crippled system and we are returning void,
+ * so no attempt is made to handle this situation here.
+ */
+ if (!sve_vl_valid(sve_max_vl))
+ goto fail;
+
+ efi_sve_state = __alloc_percpu(
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
+ if (!efi_sve_state)
+ goto fail;
+
+ return;
+
+fail:
+ panic("Cannot allocate percpu memory for EFI SVE save/restore");
+}
+
+/*
+ * Enable SVE for EL1.
+ * Intended for use by the cpufeatures code during CPU boot.
+ */
+int sve_kernel_enable(void *__always_unused p)
+{
+ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
+ isb();
+
+ return 0;
+}
+
+void __init sve_setup(void)
+{
+ u64 zcr;
+
+ if (!system_supports_sve())
+ return;
+
+ /*
+ * The SVE architecture mandates support for 128-bit vectors,
+ * so sve_vq_map must have at least SVE_VQ_MIN set.
+ * If something went wrong, at least try to patch it up:
+ */
+ if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+
+ zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
+
+ /*
+ * Sanity-check that the max VL we determined through CPU features
+ * corresponds properly to sve_vq_map. If not, do our best:
+ */
+ if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
+ sve_max_vl = find_supported_vector_length(sve_max_vl);
+
+ /*
+ * For the default VL, pick the maximum supported value <= 64.
+ * VL == 64 is guaranteed not to grow the signal frame.
+ */
+ sve_default_vl = find_supported_vector_length(64);
+
+ pr_info("SVE: maximum available vector length %u bytes per vector\n",
+ sve_max_vl);
+ pr_info("SVE: default vector length %u bytes per vector\n",
+ sve_default_vl);
+
+ sve_efi_setup();
+}
+
+/*
+ * Called from the put_task_struct() path, which cannot get here
+ * unless dead_task is really dead and not schedulable.
+ */
+void fpsimd_release_task(struct task_struct *dead_task)
+{
+ __sve_free(dead_task);
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
+/*
+ * Trapped SVE access
+ *
+ * Storage is allocated for the full SVE state, the current FPSIMD
+ * register contents are migrated across, and TIF_SVE is set so that
+ * the SVE access trap will be disabled the next time this task
+ * reaches ret_to_user.
+ *
+ * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
+ * would have disabled the SVE access trap for userspace during
+ * ret_to_user, making an SVE access trap impossible in that case.
+ */
+asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
+{
+ /* Even if we chose not to use SVE, the hardware could still trap: */
+ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ return;
+ }
+
+ sve_alloc(current);
+
+ local_bh_disable();
+
+ task_fpsimd_save();
+ fpsimd_to_sve(current);
+
+ /* Force ret_to_user to reload the registers: */
+ fpsimd_flush_task_state(current);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+
+ if (test_and_set_thread_flag(TIF_SVE))
+ WARN_ON(1); /* SVE access shouldn't have trapped */
+
+ local_bh_enable();
+}
+
/*
* Trapped FP/ASIMD access.
*/
-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
+asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
/* TODO: implement lazy context saving/restoring */
WARN_ON(1);
@@ -111,7 +859,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
/*
* Raise a SIGFPE for the current process.
*/
-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
+asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
siginfo_t info;
unsigned int si_code = 0;
@@ -144,8 +892,8 @@ void fpsimd_thread_switch(struct task_struct *next)
* the registers is in fact the most recent userland FPSIMD state of
* 'current'.
*/
- if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
+ if (current->mm)
+ task_fpsimd_save();
if (next->mm) {
/*
@@ -159,16 +907,16 @@ void fpsimd_thread_switch(struct task_struct *next)
if (__this_cpu_read(fpsimd_last_state) == st
&& st->cpu == smp_processor_id())
- clear_ti_thread_flag(task_thread_info(next),
- TIF_FOREIGN_FPSTATE);
+ clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
else
- set_ti_thread_flag(task_thread_info(next),
- TIF_FOREIGN_FPSTATE);
+ set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
}
}
void fpsimd_flush_thread(void)
{
+ int vl, supported_vl;
+
if (!system_supports_fpsimd())
return;
@@ -176,6 +924,42 @@ void fpsimd_flush_thread(void)
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
+
+ if (system_supports_sve()) {
+ clear_thread_flag(TIF_SVE);
+ sve_free(current);
+
+ /*
+ * Reset the task vector length as required.
+ * This is where we ensure that all user tasks have a valid
+ * vector length configured: no kernel task can become a user
+ * task without an exec and hence a call to this function.
+ * By the time the first call to this function is made, all
+ * early hardware probing is complete, so sve_default_vl
+ * should be valid.
+ * If a bug causes this to go wrong, we make some noise and
+ * try to fudge thread.sve_vl to a safe value here.
+ */
+ vl = current->thread.sve_vl_onexec ?
+ current->thread.sve_vl_onexec : sve_default_vl;
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ supported_vl = find_supported_vector_length(vl);
+ if (WARN_ON(supported_vl != vl))
+ vl = supported_vl;
+
+ current->thread.sve_vl = vl;
+
+ /*
+ * If the task is not set to inherit, ensure that the vector
+ * length will be reset by a subsequent exec:
+ */
+ if (!test_thread_flag(TIF_SVE_VL_INHERIT))
+ current->thread.sve_vl_onexec = 0;
+ }
+
set_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
@@ -191,14 +975,23 @@ void fpsimd_preserve_current_state(void)
return;
local_bh_disable();
-
- if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
-
+ task_fpsimd_save();
local_bh_enable();
}
/*
+ * Like fpsimd_preserve_current_state(), but ensure that
+ * current->thread.fpsimd_state is updated so that it can be copied to
+ * the signal frame.
+ */
+void fpsimd_signal_preserve_current_state(void)
+{
+ fpsimd_preserve_current_state();
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_to_fpsimd(current);
+}
+
+/*
* Load the userland FPSIMD state of 'current' from memory, but only if the
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD
* state of 'current'
@@ -213,7 +1006,7 @@ void fpsimd_restore_current_state(void)
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
- fpsimd_load_state(st);
+ task_fpsimd_load();
__this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
@@ -233,7 +1026,12 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
local_bh_disable();
- fpsimd_load_state(state);
+ if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ current->thread.fpsimd_state = *state;
+ fpsimd_to_sve(current);
+ }
+ task_fpsimd_load();
+
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
@@ -252,6 +1050,33 @@ void fpsimd_flush_task_state(struct task_struct *t)
t->thread.fpsimd_state.cpu = NR_CPUS;
}
+static inline void fpsimd_flush_cpu_state(void)
+{
+ __this_cpu_write(fpsimd_last_state, NULL);
+}
+
+/*
+ * Invalidate any task SVE state currently held in this CPU's regs.
+ *
+ * This is used to prevent the kernel from trying to reuse SVE register data
+ * that is detroyed by KVM guest enter/exit. This function should go away when
+ * KVM SVE support is implemented. Don't use it for anything else.
+ */
+#ifdef CONFIG_ARM64_SVE
+void sve_flush_cpu_state(void)
+{
+ struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
+ struct task_struct *tsk;
+
+ if (!fpstate)
+ return;
+
+ tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
+ if (test_tsk_thread_flag(tsk, TIF_SVE))
+ fpsimd_flush_cpu_state();
+}
+#endif /* CONFIG_ARM64_SVE */
+
#ifdef CONFIG_KERNEL_MODE_NEON
DEFINE_PER_CPU(bool, kernel_neon_busy);
@@ -286,11 +1111,13 @@ void kernel_neon_begin(void)
__this_cpu_write(kernel_neon_busy, true);
/* Save unsaved task fpsimd state, if any: */
- if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
+ if (current->mm) {
+ task_fpsimd_save();
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
/* Invalidate any task state remaining in the fpsimd regs: */
- __this_cpu_write(fpsimd_last_state, NULL);
+ fpsimd_flush_cpu_state();
preempt_disable();
@@ -325,6 +1152,7 @@ EXPORT_SYMBOL(kernel_neon_end);
static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
+static DEFINE_PER_CPU(bool, efi_sve_state_used);
/*
* EFI runtime services support functions
@@ -350,10 +1178,24 @@ void __efi_fpsimd_begin(void)
WARN_ON(preemptible());
- if (may_use_simd())
+ if (may_use_simd()) {
kernel_neon_begin();
- else {
- fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ } else {
+ /*
+ * If !efi_sve_state, SVE can't be in use yet and doesn't need
+ * preserving:
+ */
+ if (system_supports_sve() && likely(efi_sve_state)) {
+ char *sve_state = this_cpu_ptr(efi_sve_state);
+
+ __this_cpu_write(efi_sve_state_used, true);
+
+ sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
+ } else {
+ fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+
__this_cpu_write(efi_fpsimd_state_used, true);
}
}
@@ -366,10 +1208,22 @@ void __efi_fpsimd_end(void)
if (!system_supports_fpsimd())
return;
- if (__this_cpu_xchg(efi_fpsimd_state_used, false))
- fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
- else
+ if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
kernel_neon_end();
+ } else {
+ if (system_supports_sve() &&
+ likely(__this_cpu_read(efi_sve_state_used))) {
+ char const *sve_state = this_cpu_ptr(efi_sve_state);
+
+ sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
+ sve_vq_from_vl(sve_get_vl()) - 1);
+
+ __this_cpu_write(efi_sve_state_used, false);
+ } else {
+ fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+ }
}
#endif /* CONFIG_EFI */
@@ -382,9 +1236,9 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
{
switch (cmd) {
case CPU_PM_ENTER:
- if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
- this_cpu_write(fpsimd_last_state, NULL);
+ if (current->mm)
+ task_fpsimd_save();
+ fpsimd_flush_cpu_state();
break;
case CPU_PM_EXIT:
if (current->mm)
@@ -442,6 +1296,6 @@ static int __init fpsimd_init(void)
if (!(elf_hwcap & HWCAP_ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
- return 0;
+ return sve_sysctl_init();
}
-late_initcall(fpsimd_init);
+core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0b243ecaf7ac..67e86a0f57ac 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -480,14 +480,21 @@ set_hcr:
/* Statistical profiling */
ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
- cbz x0, 6f // Skip if SPE not present
- cbnz x2, 5f // VHE?
+ cbz x0, 7f // Skip if SPE not present
+ cbnz x2, 6f // VHE?
+ mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
+ and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
+ cbnz x4, 5f // then permit sampling of physical
+ mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
+ 1 << SYS_PMSCR_EL2_PA_SHIFT)
+ msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
+5:
mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
orr x3, x3, x1 // If we don't have VHE, then
- b 6f // use EL1&0 translation.
-5: // For VHE, use EL2 translation
+ b 7f // use EL1&0 translation.
+6: // For VHE, use EL2 translation
orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
-6:
+7:
msr mdcr_el2, x3 // Configure debug traps
/* Stage-2 translation */
@@ -517,8 +524,19 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
mov x0, #0x33ff
msr cptr_el2, x0 // Disable copro. traps to EL2
+ /* SVE register access */
+ mrs x1, id_aa64pfr0_el1
+ ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
+ cbz x1, 7f
+
+ bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+ isb
+ mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
+ msr_s SYS_ZCR_EL2, x1 // length for EL1.
+
/* Hypervisor stub */
- adr_l x0, __hyp_stub_vectors
+7: adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
/* spsr */
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 095d3c170f5d..3009b8b80f08 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -27,6 +27,7 @@
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/irqflags.h>
#include <asm/kexec.h>
#include <asm/memory.h>
@@ -285,7 +286,7 @@ int swsusp_arch_suspend(void)
return -EBUSY;
}
- local_dbg_save(flags);
+ flags = local_daif_save();
if (__cpu_suspend_enter(&state)) {
/* make the crash dump kernel image visible/saveable */
@@ -315,7 +316,7 @@ int swsusp_arch_suspend(void)
__cpu_suspend_exit();
}
- local_dbg_restore(flags);
+ local_daif_restore(flags);
return ret;
}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 354be2a872ae..79b17384effa 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -25,8 +25,7 @@
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
- !IS_ALIGNED((unsigned long)to, 8))) {
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
*(u8 *)to = __raw_readb(from);
from++;
to++;
@@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
- !IS_ALIGNED((unsigned long)from, 8))) {
- __raw_writeb(*(volatile u8 *)from, to);
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
- __raw_writeq(*(volatile u64 *)from, to);
+ __raw_writeq(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
- __raw_writeb(*(volatile u8 *)from, to);
+ __raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 11121f608eb5..f76ea92dff91 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -195,8 +196,7 @@ void machine_kexec(struct kimage *kimage)
pr_info("Bye!\n");
- /* Disable all DAIF exceptions. */
- asm volatile ("msr daifset, #0xf" : : : "memory");
+ local_daif_mask();
/*
* cpu_soft_restart will shutdown the MMU, disable data caches, then
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index bd1b74c2436f..1d091d048d04 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 89b6df613dde..8e4be92e25b1 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
kprobes_trampoline.o \
simulate-insn.o
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 5d6e7f14638c..45dce03aaeaf 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* trampoline entry and return code for kretprobes.
*/
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 2dc0f8482210..b2adcce7bc18 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
#include <linux/notifier.h>
#include <trace/events/power.h>
#include <linux/percpu.h>
+#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -170,6 +171,39 @@ void machine_restart(char *cmd)
while (1);
}
+static void print_pstate(struct pt_regs *regs)
+{
+ u64 pstate = regs->pstate;
+
+ if (compat_user_mode(regs)) {
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ pstate,
+ pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
+ pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
+ pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
+ pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
+ pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
+ pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
+ pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
+ pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
+ pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
+ pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
+ } else {
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
+ pstate,
+ pstate & PSR_N_BIT ? 'N' : 'n',
+ pstate & PSR_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_C_BIT ? 'C' : 'c',
+ pstate & PSR_V_BIT ? 'V' : 'v',
+ pstate & PSR_D_BIT ? 'D' : 'd',
+ pstate & PSR_A_BIT ? 'A' : 'a',
+ pstate & PSR_I_BIT ? 'I' : 'i',
+ pstate & PSR_F_BIT ? 'F' : 'f',
+ pstate & PSR_PAN_BIT ? '+' : '-',
+ pstate & PSR_UAO_BIT ? '+' : '-');
+ }
+}
+
void __show_regs(struct pt_regs *regs)
{
int i, top_reg;
@@ -186,10 +220,9 @@ void __show_regs(struct pt_regs *regs)
}
show_regs_print_info(KERN_DEFAULT);
- print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", lr);
- printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, lr, regs->pstate);
+ print_pstate(regs);
+ print_symbol("pc : %s\n", regs->pc);
+ print_symbol("lr : %s\n", lr);
printk("sp : %016llx\n", sp);
i = top_reg;
@@ -241,11 +274,27 @@ void release_thread(struct task_struct *dead_task)
{
}
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ fpsimd_release_task(tsk);
+}
+
+/*
+ * src and dst may temporarily have aliased sve_state after task_struct
+ * is copied. We cannot fix this properly here, because src may have
+ * live SVE state and dst's thread_info may not exist yet, so tweaking
+ * either src's or dst's TIF_SVE is not safe.
+ *
+ * The unaliasing is done in copy_thread() instead. This works because
+ * dst is not schedulable or traceable until both of these functions
+ * have been called.
+ */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
+
return 0;
}
@@ -258,6 +307,13 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+ /*
+ * Unalias p->thread.sve_state (if any) from the parent task
+ * and disable discard SVE state for p:
+ */
+ clear_tsk_thread_flag(p, TIF_SVE);
+ p->thread.sve_state = NULL;
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9cbb6123208f..7c44658b316d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
@@ -40,6 +41,7 @@
#include <linux/elf.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/stacktrace.h>
@@ -618,17 +620,56 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
/*
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
*/
-static int fpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+static int __fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf, unsigned int start_pos)
{
struct user_fpsimd_state *uregs;
+
+ sve_sync_to_fpsimd(target);
+
uregs = &target->thread.fpsimd_state.user_fpsimd;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
+ start_pos, start_pos + sizeof(*uregs));
+}
+
+static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
if (target == current)
fpsimd_preserve_current_state();
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
+ return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
+static int __fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned int start_pos)
+{
+ int ret;
+ struct user_fpsimd_state newstate;
+
+ /*
+ * Ensure target->thread.fpsimd_state is up to date, so that a
+ * short copyin can't resurrect stale data.
+ */
+ sve_sync_to_fpsimd(target);
+
+ newstate = target->thread.fpsimd_state.user_fpsimd;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
+ start_pos, start_pos + sizeof(newstate));
+ if (ret)
+ return ret;
+
+ target->thread.fpsimd_state.user_fpsimd = newstate;
+
+ return ret;
}
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
@@ -636,15 +677,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_fpsimd_state newstate =
- target->thread.fpsimd_state.user_fpsimd;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
if (ret)
return ret;
- target->thread.fpsimd_state.user_fpsimd = newstate;
+ sve_sync_from_fpsimd_zeropad(target);
fpsimd_flush_task_state(target);
+
return ret;
}
@@ -702,6 +742,215 @@ static int system_call_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_ARM64_SVE
+
+static void sve_init_header_from_task(struct user_sve_header *header,
+ struct task_struct *target)
+{
+ unsigned int vq;
+
+ memset(header, 0, sizeof(*header));
+
+ header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
+ SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+
+ header->vl = target->thread.sve_vl;
+ vq = sve_vq_from_vl(header->vl);
+
+ header->max_vl = sve_max_vl;
+ if (WARN_ON(!sve_vl_valid(sve_max_vl)))
+ header->max_vl = header->vl;
+
+ header->size = SVE_PT_SIZE(vq, header->flags);
+ header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
+ SVE_PT_REGS_SVE);
+}
+
+static unsigned int sve_size_from_header(struct user_sve_header const *header)
+{
+ return ALIGN(header->size, SVE_VQ_BYTES);
+}
+
+static unsigned int sve_get_size(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ struct user_sve_header header;
+
+ if (!system_supports_sve())
+ return 0;
+
+ sve_init_header_from_task(&header, target);
+ return sve_size_from_header(&header);
+}
+
+static int sve_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ sve_init_header_from_task(&header, target);
+ vq = sve_vq_from_vl(header.vl);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ return ret;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+ return __fpr_get(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+
+ /* Otherwise: full SVE case */
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ target->thread.sve_state,
+ start, end);
+ if (ret)
+ return ret;
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ start, end);
+ if (ret)
+ return ret;
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpsimd_state.fpsr,
+ start, end);
+ if (ret)
+ return ret;
+
+ start = end;
+ end = sve_size_from_header(&header);
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ start, end);
+}
+
+static int sve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ if (count < sizeof(header))
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ goto out;
+
+ /*
+ * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
+ * sve_set_vector_length(), which will also validate them for us:
+ */
+ ret = sve_set_vector_length(target, header.vl,
+ ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
+ if (ret)
+ goto out;
+
+ /* Actual VL set may be less than the user asked for: */
+ vq = sve_vq_from_vl(target->thread.sve_vl);
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+ clear_tsk_thread_flag(target, TIF_SVE);
+ goto out;
+ }
+
+ /* Otherwise: full SVE case */
+
+ /*
+ * If setting a different VL from the requested VL and there is
+ * register data, the data layout will be wrong: don't even
+ * try to set the registers in this case.
+ */
+ if (count && vq != sve_vq_from_vl(header.vl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ sve_alloc(target);
+
+ /*
+ * Ensure target->thread.sve_state is up to date with target's
+ * FPSIMD regs, so that a short copyin leaves trailing registers
+ * unmodified.
+ */
+ fpsimd_sync_to_sve(target);
+ set_tsk_thread_flag(target, TIF_SVE);
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.sve_state,
+ start, end);
+ if (ret)
+ goto out;
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ start, end);
+ if (ret)
+ goto out;
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpsimd_state.fpsr,
+ start, end);
+
+out:
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -711,6 +960,9 @@ enum aarch64_regset {
REGSET_HW_WATCH,
#endif
REGSET_SYSTEM_CALL,
+#ifdef CONFIG_ARM64_SVE
+ REGSET_SVE,
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -768,6 +1020,18 @@ static const struct user_regset aarch64_regsets[] = {
.get = system_call_get,
.set = system_call_set,
},
+#ifdef CONFIG_ARM64_SVE
+ [REGSET_SVE] = { /* Scalable Vector Extension */
+ .core_note_type = NT_ARM_SVE,
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .get = sve_get,
+ .set = sve_set,
+ .get_size = sve_get_size,
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d4b740538ad5..30ad2f085d1f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -23,7 +23,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/cache.h>
@@ -48,6 +47,7 @@
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/elf.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -103,7 +103,8 @@ void __init smp_setup_processor_id(void)
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
- pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
+ pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
+ (unsigned long)mpidr, read_cpuid_id());
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
@@ -244,9 +245,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
- pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
-
- sprintf(init_utsname()->machine, UTS_MACHINE);
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
@@ -262,10 +260,11 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
/*
- * Unmask asynchronous aborts after bringing up possible earlycon.
- * (Report possible System Errors once we can report this occurred)
+ * Unmask asynchronous aborts and fiq after bringing up possible
+ * earlycon. (Report possible System Errors once we can report this
+ * occurred).
*/
- local_async_enable();
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 0bdc96c61bc0..b120111a46be 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -31,6 +31,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -63,6 +64,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
+ unsigned long sve_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -179,9 +181,6 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
int err;
- /* dump the hardware registers to the fpsimd_state structure */
- fpsimd_preserve_current_state();
-
/* copy the FP and status/control registers */
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
@@ -214,6 +213,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
+ clear_thread_flag(TIF_SVE);
+
/* load the hardware registers from the fpsimd_state structure */
if (!err)
fpsimd_update_current_state(&fpsimd);
@@ -221,10 +222,118 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0;
}
+
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
+ struct sve_context __user *sve;
};
+#ifdef CONFIG_ARM64_SVE
+
+static int preserve_sve_context(struct sve_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ unsigned int vl = current->thread.sve_vl;
+ unsigned int vq = 0;
+
+ if (test_thread_flag(TIF_SVE))
+ vq = sve_vq_from_vl(vl);
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
+ &ctx->head.size, err);
+ __put_user_error(vl, &ctx->vl, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ if (vq) {
+ /*
+ * This assumes that the SVE state has already been saved to
+ * the task struct by calling preserve_fpsimd_context().
+ */
+ err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
+ current->thread.sve_state,
+ SVE_SIG_REGS_SIZE(vq));
+ }
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_sve_fpsimd_context(struct user_ctxs *user)
+{
+ int err;
+ unsigned int vq;
+ struct fpsimd_state fpsimd;
+ struct sve_context sve;
+
+ if (__copy_from_user(&sve, user->sve, sizeof(sve)))
+ return -EFAULT;
+
+ if (sve.vl != current->thread.sve_vl)
+ return -EINVAL;
+
+ if (sve.head.size <= sizeof(*user->sve)) {
+ clear_thread_flag(TIF_SVE);
+ goto fpsimd_only;
+ }
+
+ vq = sve_vq_from_vl(sve.vl);
+
+ if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.sve_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ barrier();
+ /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
+
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ barrier();
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+ sve_alloc(current);
+ err = __copy_from_user(current->thread.sve_state,
+ (char __user const *)user->sve +
+ SVE_SIG_REGS_OFFSET,
+ SVE_SIG_REGS_SIZE(vq));
+ if (err)
+ return -EFAULT;
+
+ set_thread_flag(TIF_SVE);
+
+fpsimd_only:
+ /* copy the FP and status/control registers */
+ /* restore_sigframe() already checked that user->fpsimd != NULL. */
+ err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
+ sizeof(fpsimd.vregs));
+ __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
+ __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
+
+ /* load the hardware registers from the fpsimd_state structure */
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
+
+ return err ? -EFAULT : 0;
+}
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_sve_context(void __user *ctx);
+extern int restore_sve_fpsimd_context(struct user_ctxs *user);
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+
static int parse_user_sigframe(struct user_ctxs *user,
struct rt_sigframe __user *sf)
{
@@ -237,6 +346,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
char const __user *const sfp = (char const __user *)sf;
user->fpsimd = NULL;
+ user->sve = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -287,6 +397,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
/* ignore */
break;
+ case SVE_MAGIC:
+ if (!system_supports_sve())
+ goto invalid;
+
+ if (user->sve)
+ goto invalid;
+
+ if (size < sizeof(*user->sve))
+ goto invalid;
+
+ user->sve = (struct sve_context __user *)head;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -343,6 +466,10 @@ static int parse_user_sigframe(struct user_ctxs *user,
*/
offset = 0;
limit = extra_size;
+
+ if (!access_ok(VERIFY_READ, base, limit))
+ goto invalid;
+
continue;
default:
@@ -359,9 +486,6 @@ static int parse_user_sigframe(struct user_ctxs *user,
}
done:
- if (!user->fpsimd)
- goto invalid;
-
return 0;
invalid:
@@ -395,8 +519,19 @@ static int restore_sigframe(struct pt_regs *regs,
if (err == 0)
err = parse_user_sigframe(&user, sf);
- if (err == 0)
- err = restore_fpsimd_context(user.fpsimd);
+ if (err == 0) {
+ if (!user.fpsimd)
+ return -EINVAL;
+
+ if (user.sve) {
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ err = restore_sve_fpsimd_context(&user);
+ } else {
+ err = restore_fpsimd_context(user.fpsimd);
+ }
+ }
return err;
}
@@ -455,6 +590,18 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
return err;
}
+ if (system_supports_sve()) {
+ unsigned int vq = 0;
+
+ if (test_thread_flag(TIF_SVE))
+ vq = sve_vq_from_vl(current->thread.sve_vl);
+
+ err = sigframe_alloc(user, &user->sve_offset,
+ SVE_SIG_CONTEXT_SIZE(vq));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -496,6 +643,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
+ /* Scalable Vector Extension state, if present */
+ if (system_supports_sve() && err == 0 && user->sve_offset) {
+ struct sve_context __user *sve_ctx =
+ apply_user_offset(user, user->sve_offset);
+ err |= preserve_sve_context(sve_ctx);
+ }
+
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
@@ -595,6 +749,8 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct rt_sigframe __user *frame;
int err = 0;
+ fpsimd_signal_preserve_current_state();
+
if (get_sigframe(&user, ksig, regs))
return 1;
@@ -756,9 +912,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) {
+ /* Unmask Debug and SError for the next task */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
schedule();
} else {
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -775,7 +934,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
fpsimd_restore_current_state();
}
- local_irq_disable();
+ local_daif_mask();
thread_flags = READ_ONCE(current_thread_info()->flags);
} while (thread_flags & _TIF_WORK_MASK);
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e09bf5d15606..22711ee8e36c 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -239,7 +239,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
* Note that this also saves V16-31, which aren't visible
* in AArch32.
*/
- fpsimd_preserve_current_state();
+ fpsimd_signal_preserve_current_state();
/* Place structure header on the stack */
__put_user_error(magic, &frame->magic, err);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index df67652e46f0..10dd16d7902d 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 9f7195a5773e..551eb07c53b6 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -47,6 +47,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
@@ -216,6 +217,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
*/
asmlinkage void secondary_start_kernel(void)
{
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
unsigned int cpu;
@@ -265,14 +267,14 @@ asmlinkage void secondary_start_kernel(void)
* the CPU migration code to notice that the CPU is online
* before we continue.
*/
- pr_info("CPU%u: Booted secondary processor [%08x]\n",
- cpu, read_cpuid_id());
+ pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
+ cpu, (unsigned long)mpidr,
+ read_cpuid_id());
update_cpu_boot_status(CPU_BOOT_SUCCESS);
set_cpu_online(cpu, true);
complete(&cpu_running);
- local_irq_enable();
- local_async_enable();
+ local_daif_restore(DAIF_PROCCTX);
/*
* OK, it's off to the idle thread for us
@@ -368,10 +370,6 @@ void __cpu_die(unsigned int cpu)
/*
* Called from the idle thread for the CPU which has been shutdown.
*
- * Note that we disable IRQs here, but do not re-enable them
- * before returning to the caller. This is also the behaviour
- * of the other hotplug-cpu capable cores, so presumably coming
- * out of idle fixes this.
*/
void cpu_die(void)
{
@@ -379,7 +377,7 @@ void cpu_die(void)
idle_task_exit();
- local_irq_disable();
+ local_daif_mask();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
(void)cpu_report_death();
@@ -837,7 +835,7 @@ static void ipi_cpu_stop(unsigned int cpu)
{
set_cpu_online(cpu, false);
- local_irq_disable();
+ local_daif_mask();
while (1)
cpu_relax();
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1e3be9064cfa..3fe5ad884418 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,9 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
#include <asm/pgtable.h>
@@ -11,7 +13,6 @@
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
-#include <asm/tlbflush.h>
/*
* This is allocated by cpu_suspend_init(), and used to store a pointer to
@@ -57,7 +58,7 @@ void notrace __cpu_suspend_exit(void)
/*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled
- * through local_dbg_restore.
+ * by cpu_suspend()s local_daif_restore() call.
*/
if (hw_breakpoint_restore)
hw_breakpoint_restore(cpu);
@@ -81,7 +82,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* updates to mdscr register (saved and restored along with
* general purpose registers) from kernel debuggers.
*/
- local_dbg_save(flags);
+ flags = local_daif_save();
/*
* Function graph tracer state gets incosistent when the kernel
@@ -114,7 +115,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* restored, so from this point onwards, debugging is fully
* renabled if it was enabled when core started shutdown.
*/
- local_dbg_restore(flags);
+ local_daif_restore(flags);
return ret;
}
diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h
index ae1dd598ea65..6c40f58b844a 100644
--- a/arch/arm64/kernel/trace-events-emulation.h
+++ b/arch/arm64/kernel/trace-events-emulation.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM emulation
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ea4b85aee0e..3d3588fcd1c7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/atomic.h>
#include <asm/bug.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/insn.h>
@@ -58,55 +59,9 @@ static const char *handler[]= {
int show_unhandled_signals = 1;
-/*
- * Dump out the contents of some kernel memory nicely...
- */
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
-{
- unsigned long first;
- mm_segment_t fs;
- int i;
-
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
-
- for (first = bottom & ~31; first < top; first += 32) {
- unsigned long p;
- char str[sizeof(" 12345678") * 8 + 1];
-
- memset(str, ' ', sizeof(str));
- str[sizeof(str) - 1] = '\0';
-
- for (p = first, i = 0; i < (32 / 8)
- && p < top; i++, p += 8) {
- if (p >= bottom && p < top) {
- unsigned long val;
-
- if (__get_user(val, (unsigned long *)p) == 0)
- sprintf(str + i * 17, " %016lx", val);
- else
- sprintf(str + i * 17, " ????????????????");
- }
- }
- printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
- }
-
- set_fs(fs);
-}
-
static void dump_backtrace_entry(unsigned long where)
{
- /*
- * Note that 'where' can have a physical address, but it's not handled.
- */
- print_ip_sym(where);
+ printk(" %pS\n", (void *)where);
}
static void __dump_instr(const char *lvl, struct pt_regs *regs)
@@ -118,7 +73,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1; i++) {
unsigned int val, bad;
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
@@ -171,10 +126,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
skip = !!regs;
printk("Call trace:\n");
- while (1) {
- unsigned long stack;
- int ret;
-
+ do {
/* skip until specified stack frame */
if (!skip) {
dump_backtrace_entry(frame.pc);
@@ -189,17 +141,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
*/
dump_backtrace_entry(regs->pc);
}
- ret = unwind_frame(tsk, &frame);
- if (ret < 0)
- break;
- if (in_entry_text(frame.pc)) {
- stack = frame.fp - offsetof(struct pt_regs, stackframe);
-
- if (on_accessible_stack(tsk, stack))
- dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
- }
- }
+ } while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
@@ -293,6 +235,17 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
+void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
+{
+ regs->pc += size;
+
+ /*
+ * If we were single stepping, we want to get the step exception after
+ * we return from the trap.
+ */
+ user_fastforward_single_step(current);
+}
+
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
@@ -358,8 +311,8 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-static void force_signal_inject(int signal, int code, struct pt_regs *regs,
- unsigned long address)
+void force_signal_inject(int signal, int code, struct pt_regs *regs,
+ unsigned long address)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -373,7 +326,7 @@ static void force_signal_inject(int signal, int code, struct pt_regs *regs,
desc = "illegal memory access";
break;
default:
- desc = "bad mode";
+ desc = "unknown or unrecoverable error";
break;
}
@@ -480,7 +433,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
if (ret)
arm64_notify_segfault(regs, address);
else
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -490,7 +443,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
pt_regs_write_reg(regs, rt, val);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -498,7 +451,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -506,7 +459,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
struct sys64_hook {
@@ -603,6 +556,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_HVC64] = "HVC (AArch64)",
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -642,7 +596,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
esr_get_class_string(esr));
die("Oops - bad mode", regs, 0);
- local_irq_disable();
+ local_daif_mask();
panic("bad mode");
}
@@ -708,6 +662,19 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
}
#endif
+asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+{
+ nmi_enter();
+
+ console_verbose();
+
+ pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
+ __show_regs(regs);
+
+ panic("Asynchronous SError Interrupt");
+}
+
void __pte_error(const char *file, int line, unsigned long val)
{
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
@@ -761,7 +728,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
/* If thread survives, skip over the BUG instruction and continue: */
- regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 62c84f7cb01b..b215c712d897 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Building a vDSO image for AArch64.
#
diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
index 01924ff071ad..0664acaf61ff 100755
--- a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
+++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
#
# Match symbols in the DSO that look like VDSO_*; produce a header file
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index 76320e920965..c39872a7b03c 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres)
b.ne 4f
ldr x2, 6f
2:
- cbz w1, 3f
+ cbz x1, 3f
stp xzr, x2, [x1]
3: /* res == NULL. */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index fe56c268a7d9..7da3e5c366a0 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 52cb7ad9b2fd..13f81f971390 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# KVM configuration
#
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 5d9810086c25..861acbbac385 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module
#
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 7debb74843a0..b71247995469 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -147,6 +147,13 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* Until SVE is supported for guests: */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -160,6 +167,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 14c4e3b14bcb..f04400d494b7 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index f5154ed3da6c..321c9c05dd9e 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -65,16 +65,6 @@
default: write_debug(ptr[0], reg, 0); \
}
-#define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
-
-#define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
-#define PMBLIMITR_EL1_E BIT(0)
-
-#define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
-#define PMBIDR_EL1_P BIT(4)
-
-#define psb_csync() asm volatile("hint #17")
-
static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
{
/* The vcpu can run. but it can't hide. */
@@ -90,18 +80,18 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
return;
/* Yes; is it owned by EL3? */
- reg = read_sysreg_s(PMBIDR_EL1);
- if (reg & PMBIDR_EL1_P)
+ reg = read_sysreg_s(SYS_PMBIDR_EL1);
+ if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
return;
/* No; is the host actually using the thing? */
- reg = read_sysreg_s(PMBLIMITR_EL1);
- if (!(reg & PMBLIMITR_EL1_E))
+ reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
+ if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(PMSCR_EL1);
- write_sysreg_s(0, PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
+ write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Now drain all buffered data to memory */
@@ -122,7 +112,7 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, PMSCR_EL1);
+ write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 945e79c641c4..951f3ebaff26 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -48,7 +48,7 @@ static void __hyp_text __activate_traps_vhe(void)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~CPACR_EL1_FPEN;
+ val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
write_sysreg(val, cpacr_el1);
write_sysreg(__kvm_hyp_vector, vbar_el1);
@@ -59,7 +59,7 @@ static void __hyp_text __activate_traps_nvhe(void)
u64 val;
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
write_sysreg(val, cptr_el2);
}
@@ -81,11 +81,17 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* it will cause an exception.
*/
val = vcpu->arch.hcr_el2;
+
if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
+
+ if (val & HCR_RW) /* for AArch64 only: */
+ val |= HCR_TID3; /* TID3: trap feature register accesses */
+
write_sysreg(val, hcr_el2);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -111,7 +117,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
- write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+ write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index da6a8cfa54a0..3556715a774e 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
+/*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+static const u8 return_offsets[8][2] = {
+ [0] = { 0, 0 }, /* Reset, unused */
+ [1] = { 4, 2 }, /* Undefined */
+ [2] = { 0, 0 }, /* SVC, unused */
+ [3] = { 4, 4 }, /* Prefetch abort */
+ [4] = { 8, 8 }, /* Data abort */
+ [5] = { 0, 0 }, /* HVC, unused */
+ [6] = { 4, 4 }, /* IRQ, unused */
+ [7] = { 4, 4 }, /* FIQ, unused */
+};
+
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2e070d3baf9f..a0ee9b05e3d4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -23,6 +23,7 @@
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
+#include <linux/printk.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -892,6 +893,146 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
return true;
}
+/* Read a sanitised cpufeature ID register by sys_reg_desc */
+static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
+{
+ u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+ (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
+
+ if (id == SYS_ID_AA64PFR0_EL1) {
+ if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
+ pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
+ task_pid_nr(current));
+
+ val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
+ }
+
+ return val;
+}
+
+/* cpufeature ID register access trap handlers */
+
+static bool __access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r,
+ bool raz)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = read_id_reg(r, raz);
+ return true;
+}
+
+static bool access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, false);
+}
+
+static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, true);
+}
+
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
+static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+
+/*
+ * cpufeature ID register user accessors
+ *
+ * For now, these registers are immutable for userspace, so no values
+ * are stored, and for set_id_reg() we don't allow the effective value
+ * to be changed.
+ */
+static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ const u64 val = read_id_reg(rd, raz);
+
+ return reg_to_user(uaddr, &val, id);
+}
+
+static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ int err;
+ u64 val;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (val != read_id_reg(rd, raz))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, false);
+}
+
+static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, false);
+}
+
+static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, true);
+}
+
+static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, true);
+}
+
+/* sys_reg_desc initialiser for known cpufeature ID registers */
+#define ID_SANITISED(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
+ * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
+ * (1 <= crm < 8, 0 <= Op2 < 8).
+ */
+#define ID_UNALLOCATED(crm, op2) { \
+ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for known ID registers that we hide from guests.
+ * For now, these are exposed just like unallocated ID regs: they appear
+ * RAZ for the guest.
+ */
+#define ID_HIDDEN(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -944,6 +1085,84 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+
+ /*
+ * ID regs: all ID_SANITISED() entries here must have corresponding
+ * entries in arm64_ftr_regs[].
+ */
+
+ /* AArch64 mappings of the AArch32 ID registers */
+ /* CRm=1 */
+ ID_SANITISED(ID_PFR0_EL1),
+ ID_SANITISED(ID_PFR1_EL1),
+ ID_SANITISED(ID_DFR0_EL1),
+ ID_HIDDEN(ID_AFR0_EL1),
+ ID_SANITISED(ID_MMFR0_EL1),
+ ID_SANITISED(ID_MMFR1_EL1),
+ ID_SANITISED(ID_MMFR2_EL1),
+ ID_SANITISED(ID_MMFR3_EL1),
+
+ /* CRm=2 */
+ ID_SANITISED(ID_ISAR0_EL1),
+ ID_SANITISED(ID_ISAR1_EL1),
+ ID_SANITISED(ID_ISAR2_EL1),
+ ID_SANITISED(ID_ISAR3_EL1),
+ ID_SANITISED(ID_ISAR4_EL1),
+ ID_SANITISED(ID_ISAR5_EL1),
+ ID_SANITISED(ID_MMFR4_EL1),
+ ID_UNALLOCATED(2,7),
+
+ /* CRm=3 */
+ ID_SANITISED(MVFR0_EL1),
+ ID_SANITISED(MVFR1_EL1),
+ ID_SANITISED(MVFR2_EL1),
+ ID_UNALLOCATED(3,3),
+ ID_UNALLOCATED(3,4),
+ ID_UNALLOCATED(3,5),
+ ID_UNALLOCATED(3,6),
+ ID_UNALLOCATED(3,7),
+
+ /* AArch64 ID registers */
+ /* CRm=4 */
+ ID_SANITISED(ID_AA64PFR0_EL1),
+ ID_SANITISED(ID_AA64PFR1_EL1),
+ ID_UNALLOCATED(4,2),
+ ID_UNALLOCATED(4,3),
+ ID_UNALLOCATED(4,4),
+ ID_UNALLOCATED(4,5),
+ ID_UNALLOCATED(4,6),
+ ID_UNALLOCATED(4,7),
+
+ /* CRm=5 */
+ ID_SANITISED(ID_AA64DFR0_EL1),
+ ID_SANITISED(ID_AA64DFR1_EL1),
+ ID_UNALLOCATED(5,2),
+ ID_UNALLOCATED(5,3),
+ ID_HIDDEN(ID_AA64AFR0_EL1),
+ ID_HIDDEN(ID_AA64AFR1_EL1),
+ ID_UNALLOCATED(5,6),
+ ID_UNALLOCATED(5,7),
+
+ /* CRm=6 */
+ ID_SANITISED(ID_AA64ISAR0_EL1),
+ ID_SANITISED(ID_AA64ISAR1_EL1),
+ ID_UNALLOCATED(6,2),
+ ID_UNALLOCATED(6,3),
+ ID_UNALLOCATED(6,4),
+ ID_UNALLOCATED(6,5),
+ ID_UNALLOCATED(6,6),
+ ID_UNALLOCATED(6,7),
+
+ /* CRm=7 */
+ ID_SANITISED(ID_AA64MMFR0_EL1),
+ ID_SANITISED(ID_AA64MMFR1_EL1),
+ ID_SANITISED(ID_AA64MMFR2_EL1),
+ ID_UNALLOCATED(7,3),
+ ID_UNALLOCATED(7,4),
+ ID_UNALLOCATED(7,5),
+ ID_UNALLOCATED(7,6),
+ ID_UNALLOCATED(7,7),
+
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
@@ -1790,8 +2009,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
- /* Not saved in the sys_reg array? */
- if (r && !r->reg)
+ /* Not saved in the sys_reg array and not otherwise accessible? */
+ if (r && !(r->reg || r->get_user))
r = NULL;
return r;
@@ -1815,20 +2034,6 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1)
-FUNCTION_INVARIANT(id_pfr0_el1)
-FUNCTION_INVARIANT(id_pfr1_el1)
-FUNCTION_INVARIANT(id_dfr0_el1)
-FUNCTION_INVARIANT(id_afr0_el1)
-FUNCTION_INVARIANT(id_mmfr0_el1)
-FUNCTION_INVARIANT(id_mmfr1_el1)
-FUNCTION_INVARIANT(id_mmfr2_el1)
-FUNCTION_INVARIANT(id_mmfr3_el1)
-FUNCTION_INVARIANT(id_isar0_el1)
-FUNCTION_INVARIANT(id_isar1_el1)
-FUNCTION_INVARIANT(id_isar2_el1)
-FUNCTION_INVARIANT(id_isar3_el1)
-FUNCTION_INVARIANT(id_isar4_el1)
-FUNCTION_INVARIANT(id_isar5_el1)
FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1)
@@ -1836,20 +2041,6 @@ FUNCTION_INVARIANT(aidr_el1)
static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
- { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
- { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
- { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
- { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
- { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
- { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
- { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
- { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
- { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
- { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
- { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
- { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
- { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
- { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
@@ -2079,12 +2270,31 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
return true;
}
+static int walk_one_sys_reg(const struct sys_reg_desc *rd,
+ u64 __user **uind,
+ unsigned int *total)
+{
+ /*
+ * Ignore registers we trap but don't save,
+ * and for which no custom user accessor is provided.
+ */
+ if (!(rd->reg || rd->get_user))
+ return 0;
+
+ if (!copy_reg_to_user(rd, uind))
+ return -EFAULT;
+
+ (*total)++;
+ return 0;
+}
+
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
{
const struct sys_reg_desc *i1, *i2, *end1, *end2;
unsigned int total = 0;
size_t num;
+ int err;
/* We check for duplicates here, to allow arch-specific overrides. */
i1 = get_target_table(vcpu->arch.target, true, &num);
@@ -2098,21 +2308,13 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
while (i1 || i2) {
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
- if (cmp <= 0) {
- /* Ignore registers we trap but don't save. */
- if (i1->reg) {
- if (!copy_reg_to_user(i1, &uind))
- return -EFAULT;
- total++;
- }
- } else {
- /* Ignore registers we trap but don't save. */
- if (i2->reg) {
- if (!copy_reg_to_user(i2, &uind))
- return -EFAULT;
- total++;
- }
- }
+ if (cmp <= 0)
+ err = walk_one_sys_reg(i1, &uind, &total);
+ else
+ err = walk_one_sys_reg(i2, &uind, &total);
+
+ if (err)
+ return err;
if (cmp <= 0 && ++i1 == end1)
i1 = NULL;
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 5188c7007169..3b82fb1ddd09 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_TRACE_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ARM64_KVM_H
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index a0abc142c92b..4e696f96451f 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
- strchr.o strrchr.o
+ strchr.o strrchr.o tishift.o
# Tell the compiler to treat all general purpose registers (with the
# exception of the IP registers, which are already handled by the caller
diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c
index dad4ec9bbfd1..e48ac402e7be 100644
--- a/arch/arm64/lib/delay.c
+++ b/arch/arm64/lib/delay.c
@@ -24,10 +24,28 @@
#include <linux/module.h>
#include <linux/timex.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define USECS_TO_CYCLES(time_usecs) \
+ xloops_to_cycles((time_usecs) * 0x10C7UL)
+
+static inline unsigned long xloops_to_cycles(unsigned long xloops)
+{
+ return (xloops * loops_per_jiffy * HZ) >> 32;
+}
+
void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
+ if (arch_timer_evtstrm_available()) {
+ const cycles_t timer_evt_period =
+ USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
+
+ while ((get_cycles() - start + timer_evt_period) < cycles)
+ wfe();
+ }
+
while ((get_cycles() - start) < cycles)
cpu_relax();
}
@@ -35,10 +53,7 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
- unsigned long loops;
-
- loops = xloops * loops_per_jiffy * HZ;
- __delay(loops >> 32);
+ __delay(xloops_to_cycles(xloops));
}
EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
new file mode 100644
index 000000000000..0179a43cc045
--- /dev/null
+++ b/arch/arm64/lib/tishift.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(__ashlti3)
+ cbz x2, 1f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 2f
+ lsl x1, x1, x2
+ lsr x3, x0, x3
+ lsl x2, x0, x2
+ orr x1, x1, x3
+ mov x0, x2
+1:
+ ret
+2:
+ neg w1, w3
+ mov x2, #0
+ lsl x1, x0, x1
+ mov x0, x2
+ ret
+ENDPROC(__ashlti3)
+
+ENTRY(__ashrti3)
+ cbz x2, 3f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 4f
+ lsr x0, x0, x2
+ lsl x3, x1, x3
+ asr x2, x1, x2
+ orr x0, x0, x3
+ mov x1, x2
+3:
+ ret
+4:
+ neg w0, w3
+ asr x2, x1, #63
+ asr x0, x1, x0
+ mov x1, x2
+ ret
+ENDPROC(__ashrti3)
+
+ENTRY(__lshrti3)
+ cbz x2, 1f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 2f
+ lsr x0, x0, x2
+ lsl x3, x1, x3
+ lsr x2, x1, x2
+ orr x0, x0, x3
+ mov x1, x2
+1:
+ ret
+2:
+ neg w0, w3
+ mov x2, #0
+ lsr x0, x1, x0
+ mov x1, x2
+ ret
+ENDPROC(__lshrti3)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 9b0ba191e48e..849c1df3d214 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 614af886b7ef..b45c5bcaeccb 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -166,7 +166,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
/* create a coherent mapping */
page = virt_to_page(ptr);
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, NULL);
+ prot, __builtin_return_address(0));
if (!coherent_ptr)
goto no_map;
@@ -303,8 +303,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
unsigned long pfn, size_t size)
{
int ret = -ENXIO;
- unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
- PAGE_SHIFT;
+ unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index c9f118cd812b..81e694af5f8c 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on arch/arm/mm/extable.c
*/
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2069e9bc0fca..22168cd0dde7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
} else {
- pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
+ pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
}
pr_alert(" CM = %lu, WnR = %lu\n",
@@ -105,13 +105,11 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
}
-/*
- * Decode mem abort information
- */
static void mem_abort_decode(unsigned int esr)
{
pr_alert("Mem abort info:\n");
+ pr_alert(" ESR = 0x%08x\n", esr);
pr_alert(" Exception class = %s, IL = %u bits\n",
esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
@@ -249,9 +247,6 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
return false;
}
-/*
- * The kernel tried to access some page that wasn't present.
- */
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -264,9 +259,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
- /*
- * No handler, we'll have to terminate things with extreme prejudice.
- */
bust_spinlocks(1);
if (is_permission_fault(esr, regs, addr)) {
@@ -291,10 +283,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
do_exit(SIGKILL);
}
-/*
- * Something tried to access memory that isn't in our memory map. User mode
- * accesses just cause a SIGSEGV
- */
static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int esr, unsigned int sig, int code,
struct pt_regs *regs, int fault)
@@ -559,23 +547,6 @@ no_context:
return 0;
}
-/*
- * First Level Translation Fault Handler
- *
- * We enter here because the first level page table doesn't contain a valid
- * entry for the address.
- *
- * If the address is in kernel space (>= TASK_SIZE), then we are probably
- * faulting in the vmalloc() area.
- *
- * If the init_task's first level page tables contains the relevant entry, we
- * copy the it to this task. If not, we send the process a signal, fixup the
- * exception, or oops the kernel.
- *
- * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
- * or a critical region, and should only copy the information from the master
- * page table, nothing more.
- */
static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
@@ -594,18 +565,11 @@ static int do_alignment_fault(unsigned long addr, unsigned int esr,
return 0;
}
-/*
- * This abort handler always returns "fault".
- */
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- return 1;
+ return 1; /* "fault" */
}
-/*
- * This abort handler deals with Synchronous External Abort.
- * It calls notifiers, and then returns "fault".
- */
static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct siginfo info;
@@ -668,14 +632,14 @@ static const struct fault_info fault_info[] = {
{ do_sea, SIGBUS, 0, "level 1 (translation table walk)" },
{ do_sea, SIGBUS, 0, "level 2 (translation table walk)" },
{ do_sea, SIGBUS, 0, "level 3 (translation table walk)" },
- { do_sea, SIGBUS, 0, "synchronous parity or ECC error" },
+ { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
- { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" },
+ { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "unknown 34" },
@@ -693,7 +657,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
{ do_bad, SIGBUS, 0, "TLB conflict abort" },
- { do_bad, SIGBUS, 0, "unknown 49" },
+ { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
@@ -710,13 +674,6 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "unknown 63" },
};
-/*
- * Handle Synchronous External Aborts that occur in a guest kernel.
- *
- * The return value will be zero if the SEA was successfully handled
- * and non-zero if there was an error processing the error or there was
- * no error to process.
- */
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
int ret = -ENOENT;
@@ -727,9 +684,6 @@ int handle_guest_sea(phys_addr_t addr, unsigned int esr)
return ret;
}
-/*
- * Dispatch a data abort to the relevant handler.
- */
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -739,11 +693,14 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
if (!inf->fn(addr, esr, regs))
return;
- pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
- inf->name, esr, addr);
+ pr_alert("Unhandled fault: %s at 0x%016lx\n",
+ inf->name, addr);
mem_abort_decode(esr);
+ if (!user_mode(regs))
+ show_pte(addr);
+
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
@@ -751,9 +708,6 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
-/*
- * Handle stack alignment exceptions.
- */
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f1eb15e0e864..267d2b79d52d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -778,6 +778,10 @@ void __init early_fixmap_init(void)
}
}
+/*
+ * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
+ * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
+ */
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
index 91371daf397c..67a9ba9eaa96 100644
--- a/arch/arm64/mm/physaddr.c
+++ b/arch/arm64/mm/physaddr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 877d42fb0df6..95233dfc4c39 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -109,10 +109,10 @@ ENTRY(cpu_do_resume)
/*
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
- * exception. Mask them until local_dbg_restore() in cpu_suspend()
+ * exception. Mask them until local_daif_restore() in cpu_suspend()
* resets them.
*/
- disable_dbg
+ disable_daif
msr mdscr_el1, x10
msr sctlr_el1, x12
@@ -155,8 +155,7 @@ ENDPROC(cpu_do_switch_mm)
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
ENTRY(idmap_cpu_replace_ttbr1)
- mrs x2, daif
- msr daifset, #0xf
+ save_and_disable_daif flags=x2
adrp x1, empty_zero_page
msr ttbr1_el1, x1
@@ -169,7 +168,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
msr ttbr1_el1, x0
isb
- msr daif, x2
+ restore_daif x2
ret
ENDPROC(idmap_cpu_replace_ttbr1)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index eee4d864350c..02b18f8b2905 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/seq_file.h>