summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/Kbuild22
-rw-r--r--arch/arm/include/asm/arch_gicv3.h236
-rw-r--r--arch/arm/include/asm/arch_timer.h67
-rw-r--r--arch/arm/include/asm/archrandom.h12
-rw-r--r--arch/arm/include/asm/arm-cci.h13
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h280
-rw-r--r--arch/arm/include/asm/assembler.h418
-rw-r--r--arch/arm/include/asm/atomic.h181
-rw-r--r--arch/arm/include/asm/bL_switcher.h5
-rw-r--r--arch/arm/include/asm/barrier.h35
-rw-r--r--arch/arm/include/asm/bitops.h111
-rw-r--r--arch/arm/include/asm/bitrev.h1
-rw-r--r--arch/arm/include/asm/bug.h11
-rw-r--r--arch/arm/include/asm/bugs.h13
-rw-r--r--arch/arm/include/asm/cache.h9
-rw-r--r--arch/arm/include/asm/cacheflush.h76
-rw-r--r--arch/arm/include/asm/cachetype.h16
-rw-r--r--arch/arm/include/asm/checksum.h17
-rw-r--r--arch/arm/include/asm/clocksource.h7
-rw-r--r--arch/arm/include/asm/cmpxchg.h35
-rw-r--r--arch/arm/include/asm/compiler.h1
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cpu.h6
-rw-r--r--arch/arm/include/asm/cpufeature.h5
-rw-r--r--arch/arm/include/asm/cpuidle.h11
-rw-r--r--arch/arm/include/asm/cputype.h36
-rw-r--r--arch/arm/include/asm/cti.h159
-rw-r--r--arch/arm/include/asm/current.h65
-rw-r--r--arch/arm/include/asm/dcc.h10
-rw-r--r--arch/arm/include/asm/delay.h3
-rw-r--r--arch/arm/include/asm/device.h13
-rw-r--r--arch/arm/include/asm/div64.h46
-rw-r--r--arch/arm/include/asm/dma-contiguous.h14
-rw-r--r--arch/arm/include/asm/dma-iommu.h9
-rw-r--r--arch/arm/include/asm/dma-mapping.h256
-rw-r--r--arch/arm/include/asm/dma.h14
-rw-r--r--arch/arm/include/asm/dmi.h6
-rw-r--r--arch/arm/include/asm/domain.h34
-rw-r--r--arch/arm/include/asm/ecard.h3
-rw-r--r--arch/arm/include/asm/edac.h13
-rw-r--r--arch/arm/include/asm/efi.h72
-rw-r--r--arch/arm/include/asm/elf.h30
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S39
-rw-r--r--arch/arm/include/asm/exception.h6
-rw-r--r--arch/arm/include/asm/fb.h19
-rw-r--r--arch/arm/include/asm/fiq.h1
-rw-r--r--arch/arm/include/asm/firmware.h7
-rw-r--r--arch/arm/include/asm/fixmap.h9
-rw-r--r--arch/arm/include/asm/flat.h36
-rw-r--r--arch/arm/include/asm/floppy.h97
-rw-r--r--arch/arm/include/asm/fncpy.h14
-rw-r--r--arch/arm/include/asm/fpstate.h16
-rw-r--r--arch/arm/include/asm/fpu.h15
-rw-r--r--arch/arm/include/asm/ftrace.h17
-rw-r--r--arch/arm/include/asm/futex.h40
-rw-r--r--arch/arm/include/asm/glue-cache.h37
-rw-r--r--arch/arm/include/asm/glue-df.h5
-rw-r--r--arch/arm/include/asm/glue-pf.h5
-rw-r--r--arch/arm/include/asm/glue-proc.h5
-rw-r--r--arch/arm/include/asm/glue.h5
-rw-r--r--arch/arm/include/asm/gpio.h25
-rw-r--r--arch/arm/include/asm/hardirq.h28
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h100
-rw-r--r--arch/arm/include/asm/hardware/cache-b15-rac.h10
-rw-r--r--arch/arm/include/asm/hardware/cache-feroceon-l2.h6
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h18
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h5
-rw-r--r--arch/arm/include/asm/hardware/cache-uniphier.h11
-rw-r--r--arch/arm/include/asm/hardware/cp14.h10
-rw-r--r--arch/arm/include/asm/hardware/dec21285.h25
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-iomd.S131
-rw-r--r--arch/arm/include/asm/hardware/ioc.h5
-rw-r--r--arch/arm/include/asm/hardware/iomd.h5
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h932
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h312
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h119
-rw-r--r--arch/arm/include/asm/hardware/it8152.h115
-rw-r--r--arch/arm/include/asm/hardware/locomo.h10
-rw-r--r--arch/arm/include/asm/hardware/memc.h5
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h45
-rw-r--r--arch/arm/include/asm/hardware/scoop.h6
-rw-r--r--arch/arm/include/asm/hardware/ssp.h5
-rw-r--r--arch/arm/include/asm/highmem.h49
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h50
-rw-r--r--arch/arm/include/asm/hugetlb.h59
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h12
-rw-r--r--arch/arm/include/asm/hw_irq.h1
-rw-r--r--arch/arm/include/asm/hwcap.h1
-rw-r--r--arch/arm/include/asm/hypervisor.h6
-rw-r--r--arch/arm/include/asm/ide.h23
-rw-r--r--arch/arm/include/asm/idmap.h5
-rw-r--r--arch/arm/include/asm/insn.h26
-rw-r--r--arch/arm/include/asm/io.h94
-rw-r--r--arch/arm/include/asm/irq.h13
-rw-r--r--arch/arm/include/asm/irq_work.h1
-rw-r--r--arch/arm/include/asm/irqflags.h1
-rw-r--r--arch/arm/include/asm/jump_label.h17
-rw-r--r--arch/arm/include/asm/kasan.h33
-rw-r--r--arch/arm/include/asm/kasan_def.h81
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/asm/kexec.h8
-rw-r--r--arch/arm/include/asm/kfence.h53
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/kmap_types.h9
-rw-r--r--arch/arm/include/asm/kprobes.h36
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h9
-rw-r--r--arch/arm/include/asm/kvm_arm.h252
-rw-r--r--arch/arm/include/asm/kvm_asm.h82
-rw-r--r--arch/arm/include/asm/kvm_coproc.h48
-rw-r--r--arch/arm/include/asm/kvm_emulate.h279
-rw-r--r--arch/arm/include/asm/kvm_host.h302
-rw-r--r--arch/arm/include/asm/kvm_hyp.h127
-rw-r--r--arch/arm/include/asm/kvm_mmio.h38
-rw-r--r--arch/arm/include/asm/kvm_mmu.h226
-rw-r--r--arch/arm/include/asm/kvm_psci.h27
-rw-r--r--arch/arm/include/asm/limits.h11
-rw-r--r--arch/arm/include/asm/linkage.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h12
-rw-r--r--arch/arm/include/asm/mach/dma.h10
-rw-r--r--arch/arm/include/asm/mach/flash.h5
-rw-r--r--arch/arm/include/asm/mach/irq.h5
-rw-r--r--arch/arm/include/asm/mach/map.h6
-rw-r--r--arch/arm/include/asm/mach/pci.h12
-rw-r--r--arch/arm/include/asm/mach/sharpsl_param.h6
-rw-r--r--arch/arm/include/asm/mach/time.h10
-rw-r--r--arch/arm/include/asm/mc146818rtc.h1
-rw-r--r--arch/arm/include/asm/mcpm.h5
-rw-r--r--arch/arm/include/asm/mcs_spinlock.h1
-rw-r--r--arch/arm/include/asm/memblock.h1
-rw-r--r--arch/arm/include/asm/memory.h142
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/include/asm/mmu.h11
-rw-r--r--arch/arm/include/asm/mmu_context.h53
-rw-r--r--arch/arm/include/asm/module.h59
-rw-r--r--arch/arm/include/asm/module.lds.h7
-rw-r--r--arch/arm/include/asm/mpu.h113
-rw-r--r--arch/arm/include/asm/mtd-xip.h5
-rw-r--r--arch/arm/include/asm/neon.h5
-rw-r--r--arch/arm/include/asm/nwflash.h2
-rw-r--r--arch/arm/include/asm/opcodes-sec.h9
-rw-r--r--arch/arm/include/asm/opcodes-virt.h15
-rw-r--r--arch/arm/include/asm/opcodes.h14
-rw-r--r--arch/arm/include/asm/outercache.h14
-rw-r--r--arch/arm/include/asm/page-nommu.h5
-rw-r--r--arch/arm/include/asm/page.h45
-rw-r--r--arch/arm/include/asm/paravirt.h12
-rw-r--r--arch/arm/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/arm/include/asm/pci.h18
-rw-r--r--arch/arm/include/asm/percpu.h50
-rw-r--r--arch/arm/include/asm/perf_event.h15
-rw-r--r--arch/arm/include/asm/pgalloc.h73
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-2level-types.h14
-rw-r--r--arch/arm/include/asm/pgtable-2level.h49
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h46
-rw-r--r--arch/arm/include/asm/pgtable-3level-types.h14
-rw-r--r--arch/arm/include/asm/pgtable-3level.h67
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h28
-rw-r--r--arch/arm/include/asm/pgtable.h155
-rw-r--r--arch/arm/include/asm/probes.h11
-rw-r--r--arch/arm/include/asm/proc-fns.h84
-rw-r--r--arch/arm/include/asm/processor.h64
-rw-r--r--arch/arm/include/asm/procinfo.h5
-rw-r--r--arch/arm/include/asm/prom.h10
-rw-r--r--arch/arm/include/asm/psci.h9
-rw-r--r--arch/arm/include/asm/ptdump.h41
-rw-r--r--arch/arm/include/asm/ptrace.h48
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/sections.h18
-rw-r--r--arch/arm/include/asm/secure_cntvoff.h8
-rw-r--r--arch/arm/include/asm/semihost.h30
-rw-r--r--arch/arm/include/asm/set_memory.h14
-rw-r--r--arch/arm/include/asm/setup.h23
-rw-r--r--arch/arm/include/asm/shmparam.h1
-rw-r--r--arch/arm/include/asm/signal.h8
-rw-r--r--arch/arm/include/asm/simd.h22
-rw-r--r--arch/arm/include/asm/smp.h25
-rw-r--r--arch/arm/include/asm/smp_plat.h1
-rw-r--r--arch/arm/include/asm/smp_scu.h14
-rw-r--r--arch/arm/include/asm/smp_twd.h17
-rw-r--r--arch/arm/include/asm/sparsemem.h3
-rw-r--r--arch/arm/include/asm/spectre.h42
-rw-r--r--arch/arm/include/asm/spinlock.h39
-rw-r--r--arch/arm/include/asm/spinlock_types.h5
-rw-r--r--arch/arm/include/asm/stackprotector.h18
-rw-r--r--arch/arm/include/asm/stacktrace.h33
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h61
-rw-r--r--arch/arm/include/asm/string.h53
-rw-r--r--arch/arm/include/asm/suspend.h5
-rw-r--r--arch/arm/include/asm/swab.h1
-rw-r--r--arch/arm/include/asm/switch_to.h6
-rw-r--r--arch/arm/include/asm/sync_bitops.h30
-rw-r--r--arch/arm/include/asm/syscall.h94
-rw-r--r--arch/arm/include/asm/syscalls.h51
-rw-r--r--arch/arm/include/asm/system_info.h1
-rw-r--r--arch/arm/include/asm/system_misc.h22
-rw-r--r--arch/arm/include/asm/tcm.h22
-rw-r--r--arch/arm/include/asm/text-patching.h (renamed from arch/arm/include/asm/patch.h)1
-rw-r--r--arch/arm/include/asm/therm.h1
-rw-r--r--arch/arm/include/asm/thread_info.h101
-rw-r--r--arch/arm/include/asm/thread_notify.h5
-rw-r--r--arch/arm/include/asm/timex.h6
-rw-r--r--arch/arm/include/asm/tlb.h255
-rw-r--r--arch/arm/include/asm/tlbflush.h32
-rw-r--r--arch/arm/include/asm/tls.h36
-rw-r--r--arch/arm/include/asm/topology.h34
-rw-r--r--arch/arm/include/asm/traps.h37
-rw-r--r--arch/arm/include/asm/trusted_foundations.h73
-rw-r--r--arch/arm/include/asm/uaccess-asm.h161
-rw-r--r--arch/arm/include/asm/uaccess.h340
-rw-r--r--arch/arm/include/asm/ucontext.h16
-rw-r--r--arch/arm/include/asm/unified.h95
-rw-r--r--arch/arm/include/asm/unistd.h12
-rw-r--r--arch/arm/include/asm/unwind.h22
-rw-r--r--arch/arm/include/asm/uprobes.h5
-rw-r--r--arch/arm/include/asm/user.h5
-rw-r--r--arch/arm/include/asm/v7m.h24
-rw-r--r--arch/arm/include/asm/vdso.h5
-rw-r--r--arch/arm/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm/include/asm/vdso/cp15.h38
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h140
-rw-r--r--arch/arm/include/asm/vdso/processor.h22
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h22
-rw-r--r--arch/arm/include/asm/vdso_datapage.h60
-rw-r--r--arch/arm/include/asm/vermagic.h31
-rw-r--r--arch/arm/include/asm/vfp.h15
-rw-r--r--arch/arm/include/asm/vfpmacros.h25
-rw-r--r--arch/arm/include/asm/vga.h2
-rw-r--r--arch/arm/include/asm/virt.h32
-rw-r--r--arch/arm/include/asm/vmalloc.h4
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h177
-rw-r--r--arch/arm/include/asm/word-at-a-time.h4
-rw-r--r--arch/arm/include/asm/xen/events.h1
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h1
-rw-r--r--arch/arm/include/asm/xen/page.h5
-rw-r--r--arch/arm/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h8
-rw-r--r--arch/arm/include/asm/xor.h51
-rw-r--r--arch/arm/include/debug/8250.S12
-rw-r--r--arch/arm/include/debug/asm9260.S11
-rw-r--r--arch/arm/include/debug/at91.S11
-rw-r--r--arch/arm/include/debug/bcm63xx.S10
-rw-r--r--arch/arm/include/debug/brcmstb.S77
-rw-r--r--arch/arm/include/debug/clps711x.S11
-rw-r--r--arch/arm/include/debug/dc21285.S11
-rw-r--r--arch/arm/include/debug/digicolor.S11
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/exynos.S7
-rw-r--r--arch/arm/include/debug/icedcc.S21
-rw-r--r--arch/arm/include/debug/imx-uart.h23
-rw-r--r--arch/arm/include/debug/imx.S11
-rw-r--r--arch/arm/include/debug/ks8695.S40
-rw-r--r--arch/arm/include/debug/meson.S10
-rw-r--r--arch/arm/include/debug/msm.S16
-rw-r--r--arch/arm/include/debug/netx.S36
-rw-r--r--arch/arm/include/debug/omap2plus.S120
-rw-r--r--arch/arm/include/debug/palmchip.S1
-rw-r--r--arch/arm/include/debug/pl01x.S18
-rw-r--r--arch/arm/include/debug/renesas-scif.S16
-rw-r--r--arch/arm/include/debug/s3c24xx.S15
-rw-r--r--arch/arm/include/debug/s5pv210.S5
-rw-r--r--arch/arm/include/debug/sa1100.S11
-rw-r--r--arch/arm/include/debug/samsung.S15
-rw-r--r--arch/arm/include/debug/sirf.S38
-rw-r--r--arch/arm/include/debug/sti.S36
-rw-r--r--arch/arm/include/debug/stm32.S43
-rw-r--r--arch/arm/include/debug/tegra.S72
-rw-r--r--arch/arm/include/debug/uncompress.h1
-rw-r--r--arch/arm/include/debug/ux500.S15
-rw-r--r--arch/arm/include/debug/vexpress.S5
-rw-r--r--arch/arm/include/debug/vf.S11
-rw-r--r--arch/arm/include/debug/vt8500.S10
-rw-r--r--arch/arm/include/debug/zynq.S15
-rw-r--r--arch/arm/include/uapi/asm/Kbuild21
-rw-r--r--arch/arm/include/uapi/asm/auxvec.h1
-rw-r--r--arch/arm/include/uapi/asm/byteorder.h1
-rw-r--r--arch/arm/include/uapi/asm/fcntl.h1
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h11
-rw-r--r--arch/arm/include/uapi/asm/ioctls.h1
-rw-r--r--arch/arm/include/uapi/asm/kvm.h262
-rw-r--r--arch/arm/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/arm/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/arm/include/uapi/asm/posix_types.h1
-rw-r--r--arch/arm/include/uapi/asm/ptrace.h10
-rw-r--r--arch/arm/include/uapi/asm/setup.h3
-rw-r--r--arch/arm/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arm/include/uapi/asm/signal.h30
-rw-r--r--arch/arm/include/uapi/asm/stat.h1
-rw-r--r--arch/arm/include/uapi/asm/statfs.h1
-rw-r--r--arch/arm/include/uapi/asm/swab.h1
-rw-r--r--arch/arm/include/uapi/asm/types.h1
-rw-r--r--arch/arm/include/uapi/asm/unistd.h4
293 files changed, 4335 insertions, 7513 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 721ab5ecfb9b..03657ff8fbe3 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,26 +1,8 @@
-generic-y += clkdev.h
-generic-y += current.h
+# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += msi.h
+generic-y += flat.h
generic-y += parport.h
-generic-y += preempt.h
-generic-y += rwsem.h
-generic-y += seccomp.h
-generic-y += segment.h
-generic-y += serial.h
-generic-y += simd.h
-generic-y += sizes.h
-generic-y += timex.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generated-y += mach-types.h
generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 27475904e096..311e83038bdb 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/arch_gicv3.h
*
* Copyright (C) 2015 ARM Ltd.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_ARCH_GICV3_H
#define __ASM_ARCH_GICV3_H
@@ -21,6 +10,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
@@ -34,71 +24,19 @@
#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
#define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3)
+#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3)
+
+#define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x)
+#define ICC_AP0R0 __ICC_AP0Rx(0)
+#define ICC_AP0R1 __ICC_AP0Rx(1)
+#define ICC_AP0R2 __ICC_AP0Rx(2)
+#define ICC_AP0R3 __ICC_AP0Rx(3)
-#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
-
-#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
-#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
-#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
-#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
-#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5)
-#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
-
-#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
-#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
-
-#define ICH_LR0 __LR0(0)
-#define ICH_LR1 __LR0(1)
-#define ICH_LR2 __LR0(2)
-#define ICH_LR3 __LR0(3)
-#define ICH_LR4 __LR0(4)
-#define ICH_LR5 __LR0(5)
-#define ICH_LR6 __LR0(6)
-#define ICH_LR7 __LR0(7)
-#define ICH_LR8 __LR8(0)
-#define ICH_LR9 __LR8(1)
-#define ICH_LR10 __LR8(2)
-#define ICH_LR11 __LR8(3)
-#define ICH_LR12 __LR8(4)
-#define ICH_LR13 __LR8(5)
-#define ICH_LR14 __LR8(6)
-#define ICH_LR15 __LR8(7)
-
-/* LR top half */
-#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
-#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
-
-#define ICH_LRC0 __LRC0(0)
-#define ICH_LRC1 __LRC0(1)
-#define ICH_LRC2 __LRC0(2)
-#define ICH_LRC3 __LRC0(3)
-#define ICH_LRC4 __LRC0(4)
-#define ICH_LRC5 __LRC0(5)
-#define ICH_LRC6 __LRC0(6)
-#define ICH_LRC7 __LRC0(7)
-#define ICH_LRC8 __LRC8(0)
-#define ICH_LRC9 __LRC8(1)
-#define ICH_LRC10 __LRC8(2)
-#define ICH_LRC11 __LRC8(3)
-#define ICH_LRC12 __LRC8(4)
-#define ICH_LRC13 __LRC8(5)
-#define ICH_LRC14 __LRC8(6)
-#define ICH_LRC15 __LRC8(7)
-
-#define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
-#define ICH_AP0R0 __AP0Rx(0)
-#define ICH_AP0R1 __AP0Rx(1)
-#define ICH_AP0R2 __AP0Rx(2)
-#define ICH_AP0R3 __AP0Rx(3)
-
-#define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
-#define ICH_AP1R0 __AP1Rx(0)
-#define ICH_AP1R1 __AP1Rx(1)
-#define ICH_AP1R2 __AP1Rx(2)
-#define ICH_AP1R3 __AP1Rx(3)
-
-/* A32-to-A64 mappings used by VGIC save/restore */
+#define __ICC_AP1Rx(x) __ACCESS_CP15(c12, 0, c9, x)
+#define ICC_AP1R0 __ICC_AP1Rx(0)
+#define ICC_AP1R1 __ICC_AP1Rx(1)
+#define ICC_AP1R2 __ICC_AP1Rx(2)
+#define ICC_AP1R3 __ICC_AP1Rx(3)
#define CPUIF_MAP(a32, a64) \
static inline void write_ ## a64(u32 val) \
@@ -110,66 +48,22 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \
} \
-#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
-static inline void write_ ## a64(u64 val) \
-{ \
- write_sysreg(lower_32_bits(val), a32lo);\
- write_sysreg(upper_32_bits(val), a32hi);\
-} \
-static inline u64 read_ ## a64(void) \
-{ \
- u64 val = read_sysreg(a32lo); \
- \
- val |= (u64)read_sysreg(a32hi) << 32; \
- \
- return val; \
-}
-
-CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
-CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
-CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
-CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
-CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
-CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
-CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
-CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
-CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
-CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
-CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
-CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
-CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
-CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
-CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
-
-CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
-CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
-CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
-CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
-CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
-CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
-CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
-CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
-CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
-CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
-CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
-CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
-CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
-CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
-CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
-CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
+CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
+CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
+CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
+CPUIF_MAP(ICC_AP0R2, ICC_AP0R2_EL1)
+CPUIF_MAP(ICC_AP0R3, ICC_AP0R3_EL1)
+CPUIF_MAP(ICC_AP1R0, ICC_AP1R0_EL1)
+CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
+CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
+CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
#define read_gicreg(r) read_##r()
#define write_gicreg(v, r) write_##r(v)
/* Low-level accessors */
-static inline void gic_write_eoir(u32 irq)
-{
- write_sysreg(irq, ICC_EOIR1);
- isb();
-}
-
static inline void gic_write_dir(u32 val)
{
write_sysreg(val, ICC_DIR);
@@ -185,17 +79,17 @@ static inline u32 gic_read_iar(void)
return irqstat;
}
-static inline void gic_write_pmr(u32 val)
-{
- write_sysreg(val, ICC_PMR);
-}
-
static inline void gic_write_ctlr(u32 val)
{
write_sysreg(val, ICC_CTLR);
isb();
}
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg(ICC_CTLR);
+}
+
static inline void gic_write_grpen1(u32 val)
{
write_sysreg(val, ICC_IGRPEN1);
@@ -223,6 +117,21 @@ static inline void gic_write_bpr1(u32 val)
write_sysreg(val, ICC_BPR1);
}
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg(ICC_PMR);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg(val, ICC_PMR);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg(ICC_RPR);
+}
+
/*
* Even in 32bit systems that use LPAE, there is no guarantee that the I/O
* interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
@@ -276,6 +185,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
+ * GICR_xLPIR - only the lower bits are significant
+ */
+#define gic_read_lpir(c) readl_relaxed(c)
+#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
+
+/*
* GITS_TYPER is an ID register and doesn't need atomicity.
*/
#define gits_read_typer(c) __gic_readq_nonatomic(c)
@@ -291,5 +206,56 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
*/
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+/*
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
+ */
+#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
+ * anything else.
+ */
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr + 4);
+ if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
+ tmp &= ~(GICR_VPENDBASER_Valid >> 32);
+ writel_relaxed(tmp, addr + 4);
+ }
+
+ /*
+ * Use the fact that __gic_writeq_nonatomic writes the second
+ * half of the 64bit quantity after the first.
+ */
+ __gic_writeq_nonatomic(val, addr);
+}
+
+#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
+
+static inline bool gic_prio_masking_enabled(void)
+{
+ return false;
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return false;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index d4ebf5679f1f..bb129b6d2366 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,15 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
#include <asm/barrier.h>
#include <asm/errno.h>
+#include <asm/hwcap.h>
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
+/* 32bit ARM doesn't know anything about timer errata... */
+#define has_erratum_handler(h) (false)
+#define erratum_handler(h) (arch_timer_##h)
+
int arch_timer_arch_init(void);
/*
@@ -18,29 +25,35 @@ int arch_timer_arch_init(void);
* the code. At least it does so with a recent GCC (4.6.3).
*/
static __always_inline
-void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
-
- isb();
}
static __always_inline
@@ -53,19 +66,19 @@ u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
return val;
@@ -78,7 +91,7 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline u64 __arch_counter_get_cntpct(void)
{
u64 cval;
@@ -87,7 +100,12 @@ static inline u64 arch_counter_get_cntpct(void)
return cval;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static inline u64 __arch_counter_get_cntvct(void)
{
u64 cval;
@@ -96,6 +114,11 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
+static inline u64 __arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
@@ -106,8 +129,18 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+ isb();
+}
+
+static inline void arch_timer_set_evtstrm_feature(void)
+{
+ elf_hwcap |= HWCAP_EVTSTRM;
}
+static inline bool arch_timer_have_evtstrm_feature(void)
+{
+ return elf_hwcap & HWCAP_EVTSTRM;
+}
#endif
#endif
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
new file mode 100644
index 000000000000..cc4714eb1a75
--- /dev/null
+++ b/arch/arm/include/asm/archrandom.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
+#include <asm-generic/archrandom.h>
+
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm/include/asm/arm-cci.h b/arch/arm/include/asm/arm-cci.h
index fe77f7ab7e6b..7537bd790657 100644
--- a/arch/arm/include/asm/arm-cci.h
+++ b/arch/arm/include/asm/arm-cci.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/arm-cci.h
*
* Copyright (C) 2015 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_ARM_CCI_H
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..2ec0e5e83fc9
--- /dev/null
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+
+#define PMCCNTR __ACCESS_CP15_64(0, c9)
+
+#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
+#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
+#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
+#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
+#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
+#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
+#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
+#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
+#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
+#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
+#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
+#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(PMEVCNTR##n)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, PMEVCNTR##n)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, PMEVTYPER##n)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_sysreg(PMMIR);
+}
+
+static inline u32 read_pmuver(void)
+{
+ /* PMUVers is not a signed field */
+ u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
+
+ return (dfr0 >> 24) & 0xf;
+}
+
+static inline bool pmuv3_has_icntr(void)
+{
+ /* FEAT_PMUv3_ICNTR not accessible for 32-bit */
+ return false;
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, PMCR);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(PMCR);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, PMSELR);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, PMCCNTR);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(PMCCNTR);
+}
+
+static inline void write_pmicntr(u64 val) {}
+
+static inline u64 read_pmicntr(void)
+{
+ return 0;
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, PMCNTENSET);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, PMCNTENCLR);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, PMINTENSET);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, PMINTENCLR);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, PMCCFILTR);
+}
+
+static inline void write_pmicfiltr(u64 val) {}
+
+static inline u64 read_pmicfiltr(void)
+{
+ return 0;
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, PMOVSR);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(PMOVSR);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, PMUSERENR);
+}
+
+static inline void write_pmuacr(u64 val) {}
+
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return false;
+}
+
+static inline bool kvm_set_pmuserenr(u64 val)
+{
+ return false;
+}
+
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+/* PMU Version in DFR Register */
+#define ARMV8_PMU_DFR_VER_NI 0
+#define ARMV8_PMU_DFR_VER_V3P1 0x4
+#define ARMV8_PMU_DFR_VER_V3P4 0x5
+#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_V3P9 0x9
+#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
+ pmuver == ARMV8_PMU_DFR_VER_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+}
+
+static inline bool is_pmuv3p9(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
+}
+
+static inline u64 read_pmceid0(void)
+{
+ u64 val = read_sysreg(PMCEID0);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID2) << 32;
+
+ return val;
+}
+
+static inline u64 read_pmceid1(void)
+{
+ u64 val = read_sysreg(PMCEID1);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID3) << 32;
+
+ return val;
+}
+
+#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index ad301f107dd2..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/assembler.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains arm architecture specific defines
* for the different processors.
*
@@ -21,11 +18,12 @@
#endif
#include <asm/ptrace.h>
-#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#define IOMEM(x) (x)
@@ -89,6 +87,10 @@
#define IMM12_MASK 0xfff
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
/*
* Enable and disable interrupts
*/
@@ -110,6 +112,16 @@
.endm
#endif
+#if __LINUX_ARM_ARCH__ < 7
+ .macro dsb, args
+ mcr p15, 0, r0, c7, c10, 4
+ .endm
+
+ .macro isb, args
+ mcr p15, 0, r0, c7, c5, 4
+ .endm
+#endif
+
.macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
.if \save
@@ -206,10 +218,8 @@
* Get current thread_info.
*/
.macro get_thread_info, rd
- ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
- THUMB( mov \rd, sp )
- THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
- mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
+ /* thread_info is the first member of struct task_struct */
+ get_current \rd
.endm
/*
@@ -227,29 +237,23 @@
sub \tmp, \tmp, #1 @ decrement it
str \tmp, [\ti, #TI_PREEMPT]
.endm
-
- .macro dec_preempt_count_ti, ti, tmp
- get_thread_info \ti
- dec_preempt_count \ti, \tmp
- .endm
#else
.macro inc_preempt_count, ti, tmp
.endm
.macro dec_preempt_count, ti, tmp
.endm
-
- .macro dec_preempt_count_ti, ti, tmp
- .endm
#endif
-#define USER(x...) \
+#define USERL(l, x...) \
9999: x; \
.pushsection __ex_table,"a"; \
.align 3; \
- .long 9999b,9001f; \
+ .long 9999b,l; \
.popsection
+#define USER(x...) USERL(9001f, x)
+
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
@@ -260,7 +264,8 @@
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
+ .align 2 ;\
+ .long 9998b - . ;\
9997: instr ;\
.if . - 9997b == 2 ;\
nop ;\
@@ -270,10 +275,10 @@
.endif ;\
.popsection
#define ALT_UP_B(label) \
- .equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
- W(b) . + up_b_offset ;\
+ .align 2 ;\
+ .long 9998b - . ;\
+ W(b) . + (label - 9998b) ;\
.popsection
#else
#define ALT_SMP(instr...)
@@ -281,6 +286,80 @@
#define ALT_UP_B(label) b label
#endif
+ /*
+ * this_cpu_offset - load the per-CPU offset of this CPU into
+ * register 'rd'
+ */
+ .macro this_cpu_offset, rd:req
+#ifdef CONFIG_SMP
+ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L1_\@)
+.L0_\@:
+ .subsection 1
+.L1_\@: ldr_va \rd, __per_cpu_offset
+ b .L0_\@
+ .previous
+#endif
+#else
+ mov \rd, #0
+#endif
+ .endm
+
+ /*
+ * set_current - store the task pointer of this CPU's current task
+ */
+ .macro set_current, rn:req, tmp:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: str_va \rn, __current, \tmp
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ str_va \rn, __current, \tmp
+#endif
+ .endm
+
+ /*
+ * get_current - load the task pointer of this CPU's current task
+ */
+ .macro get_current, rd:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: ldr_va \rd, __current
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ ldr_va \rd, __current
+#endif
+ .endm
+
+ /*
+ * reload_current - reload the task pointer of this CPU's current task
+ * into the TLS register
+ */
+ .macro reload_current, t1:req, t2:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+#endif
+ ldr_this_cpu \t1, __entry_task, \t1, \t2
+ mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
+.L0_\@:
+#endif
+ .endm
+
/*
* Instruction barrier
*/
@@ -316,6 +395,23 @@
#endif
.endm
+/*
+ * Raw SMP data memory barrier
+ */
+ .macro __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ dmb ish
+ .else
+ W(dmb) ish
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#else
+ .error "Incompatible SMP platform"
+#endif
+ .endm
+
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
@@ -374,9 +470,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999:
.if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+ \instr\()b\t\cond\().w \reg, [\ptr, #\off]
.elseif \inc == 4
- \instr\cond\()\t\().w \reg, [\ptr, #\off]
+ \instr\t\cond\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
@@ -415,9 +511,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.rept \rept
9999:
.if \inc == 1
- \instr\cond\()b\()\t \reg, [\ptr], #\inc
+ \instr\()b\t\cond \reg, [\ptr], #\inc
.elseif \inc == 4
- \instr\cond\()\t \reg, [\ptr], #\inc
+ \instr\t\cond \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
@@ -447,75 +543,253 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
- adds \tmp, \addr, #\size - 1
- sbcccs \tmp, \tmp, \limit
- bcs \bad
+ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+ .macro ret\c, reg
+#if __LINUX_ARM_ARCH__ < 6
+ mov\c pc, \reg
+#else
+ .ifeqs "\reg", "lr"
+ bx\c \reg
+ .else
+ mov\c pc, \reg
+ .endif
+#endif
+ .endm
+ .endr
+
+ .macro ret.w, reg
+ ret \reg
+#ifdef CONFIG_THUMB2_KERNEL
+ nop
+#endif
+ .endm
+
+ .macro bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1: .inst 0xde02
+#else
+1: .inst 0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ .pushsection .rodata.str, "aMS", %progbits, 1
+2: .asciz "\msg"
+ .popsection
+ .pushsection __bug_table, "aw"
+ .align 2
+ .word 1b, 2b
+ .hword \line
+ .popsection
#endif
.endm
- .macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist", "aw" ; \
+ .balign 4 ; \
+ .long entry; \
+ .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
+ .macro __adldst_l, op, reg, sym, tmp, c
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\c \tmp, .La\@
+ .subsection 1
+ .align 2
+.La\@: .long \sym - .Lpc\@
+ .previous
+ .else
+ .ifnb \c
+ THUMB( ittt \c )
+ .endif
+ movw\c \tmp, #:lower16:\sym - .Lpc\@
+ movt\c \tmp, #:upper16:\sym - .Lpc\@
+ .endif
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .set .Lpc\@, . + 8 // PC bias
+ .ifc \op, add
+ add\c \reg, \tmp, pc
+ .else
+ \op\c \reg, [pc, \tmp]
+ .endif
+#else
+.Lb\@: add\c \tmp, \tmp, pc
/*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
+ * In Thumb-2 builds, the PC bias depends on whether we are currently
+ * emitting into a .arm or a .thumb section. The size of the add opcode
+ * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
+ * emitting in ARM mode, so let's use this to account for the bias.
*/
- mov \tmp, #DACR_UACCESS_DISABLE
- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
- .if \isb
- instr_sync
+ .set .Lpc\@, . + (. - .Lb\@)
+
+ .ifnc \op, add
+ \op\c \reg, [\tmp]
.endif
#endif
.endm
- .macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
+ * mov_l - move a constant value or [relocated] address into a register
*/
- mov \tmp, #DACR_UACCESS_ENABLE
- mcr p15, 0, \tmp, c3, c0, 0
- .if \isb
- instr_sync
+ .macro mov_l, dst:req, imm:req, cond
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\cond \dst, =\imm
+ .else
+ movw\cond \dst, #:lower16:\imm
+ movt\cond \dst, #:upper16:\imm
.endif
-#endif
.endm
- .macro uaccess_save, tmp
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #SVC_DACR]
-#endif
+ /*
+ * adr_l - adr pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro adr_l, dst:req, sym:req, cond
+ __adldst_l add, \dst, \sym, \dst, \cond
.endm
- .macro uaccess_restore
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #SVC_DACR]
- mcr p15, 0, r0, c3, c0, 0
-#endif
+ /*
+ * ldr_l - ldr <literal> pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro ldr_l, dst:req, sym:req, cond
+ __adldst_l ldr, \dst, \sym, \dst, \cond
.endm
- .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
- .macro ret\c, reg
-#if __LINUX_ARM_ARCH__ < 6
- mov\c pc, \reg
+ /*
+ * str_l - str <literal> pseudo-op with unlimited range
+ *
+ * @src: source register
+ * @sym: name of the symbol
+ * @tmp: mandatory scratch register
+ * @cond: conditional opcode suffix
+ */
+ .macro str_l, src:req, sym:req, tmp:req, cond
+ __adldst_l str, \src, \sym, \tmp, \cond
+ .endm
+
+ .macro __ldst_va, op, reg, tmp, sym, cond, offset
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ mov_l \tmp, \sym, \cond
#else
- .ifeqs "\reg", "lr"
- bx\c \reg
+ /*
+ * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
+ * with the appropriate relocations. The combined sequence has a range
+ * of -/+ 256 MiB, which should be sufficient for the core kernel and
+ * for modules loaded into the module region.
+ */
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+.L0_\@: sub\cond \tmp, pc, #8 - \offset
+.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
+.L2_\@:
+#endif
+ \op\cond \reg, [\tmp, #\offset]
+ .endm
+
+ /*
+ * ldr_va - load a 32-bit word from the virtual address of \sym
+ */
+ .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
+ .ifnb \tmp
+ __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
.else
- mov\c pc, \reg
+ __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
.endif
-#endif
.endm
- .endr
- .macro ret.w, reg
- ret \reg
-#ifdef CONFIG_THUMB2_KERNEL
- nop
+ /*
+ * str_va - store a 32-bit word to the virtual address of \sym
+ */
+ .macro str_va, rn:req, sym:req, tmp:req, cond
+ __ldst_va str, \rn, \tmp, \sym, \cond, 0
+ .endm
+
+ /*
+ * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
+ * without using a temp register. Supported in ARM mode
+ * only.
+ */
+ .macro ldr_this_cpu_armv6, rd:req, sym:req
+ this_cpu_offset \rd
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+ add \rd, \rd, pc
+.L0_\@: sub \rd, \rd, #4
+.L1_\@: sub \rd, \rd, #0
+.L2_\@: ldr \rd, [\rd, #4]
+ .endm
+
+ /*
+ * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
+ * into register 'rd', which may be the stack pointer,
+ * using 't1' and 't2' as general temp registers. These
+ * are permitted to overlap with 'rd' if != sp
+ */
+ .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
+#ifndef CONFIG_SMP
+ ldr_va \rd, \sym, tmp=\t1
+#elif __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ this_cpu_offset \t1
+ mov_l \t2, \sym
+ ldr \rd, [\t1, \t2]
+#else
+ ldr_this_cpu_armv6 \rd, \sym
#endif
.endm
+ /*
+ * rev_l - byte-swap a 32-bit value
+ *
+ * @val: source/destination register
+ * @tmp: scratch register
+ */
+ .macro rev_l, val:req, tmp:req
+ .if __LINUX_ARM_ARCH__ < 6
+ eor \tmp, \val, \val, ror #16
+ bic \tmp, \tmp, #0x00ff0000
+ mov \val, \val, ror #8
+ eor \val, \val, \tmp, lsr #8
+ .else
+ rev \val, \val
+ .endif
+ .endm
+
+ .if __LINUX_ARM_ARCH__ < 6
+ .set .Lrev_l_uses_tmp, 1
+ .else
+ .set .Lrev_l_uses_tmp, 0
+ .endif
+
+ /*
+ * bl_r - branch and link to register
+ *
+ * @dst: target to branch to
+ * @c: conditional opcode suffix
+ */
+ .macro bl_r, dst:req, c
+ .if __LINUX_ARM_ARCH__ < 6
+ mov\c lr, pc
+ mov\c pc, \dst
+ .else
+ blx\c \dst
+ .endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 66d0e215a773..f0e3b01afa74 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/atomic.h
*
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
@@ -18,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef __KERNEL__
/*
@@ -27,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -39,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -57,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -78,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -98,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -128,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -156,6 +151,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -164,7 +160,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -174,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -188,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -201,7 +197,17 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -214,16 +220,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#endif /* __LINUX_ARM_ARCH__ */
@@ -235,7 +232,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -252,30 +249,17 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
-#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- long long counter;
+ s64 counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline long long atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- long long result;
+ s64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
@@ -286,7 +270,7 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -295,9 +279,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
);
}
#else
-static inline long long atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- long long result;
+ s64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -308,9 +292,9 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- long long tmp;
+ s64 tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
@@ -325,9 +309,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(long long i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
- long long result; \
+ s64 result; \
unsigned long tmp; \
\
prefetchw(&v->counter); \
@@ -344,10 +328,10 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline long long \
-atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
+static inline s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
- long long result; \
+ s64 result; \
unsigned long tmp; \
\
prefetchw(&v->counter); \
@@ -367,10 +351,10 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline long long \
-atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
+static inline s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
- long long result, val; \
+ s64 result, val; \
unsigned long tmp; \
\
prefetchw(&v->counter); \
@@ -397,37 +381,36 @@ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline long long
-atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
- long long oldval;
+ s64 oldval;
unsigned long res;
prefetchw(&ptr->counter);
@@ -446,11 +429,11 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
- long long result;
+ s64 result;
unsigned long tmp;
prefetchw(&ptr->counter);
@@ -466,11 +449,11 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
- long long result;
+ s64 result;
unsigned long tmp;
smp_mb();
@@ -494,12 +477,12 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return result;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- long long val;
+ s64 oldval, newval;
unsigned long tmp;
- int ret = 1;
smp_mb();
prefetchw(&v->counter);
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
-" moveq %1, #0\n"
" beq 2f\n"
-" adds %Q0, %Q0, %Q6\n"
-" adc %R0, %R0, %R6\n"
-" strexd %2, %0, %H0, [%4]\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
- if (ret)
+ if (oldval != u)
smp_mb();
- return ret;
+ return oldval;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
index 1714800fa113..45a75d9381eb 100644
--- a/arch/arm/include/asm/bL_switcher.h
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/bL_switcher.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_BL_SWITCHER_H
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index f5d698182d50..83ae97c049d9 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
@@ -10,12 +11,20 @@
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe() do { } while (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#ifdef CONFIG_THUMB2_KERNEL
+#define CSDB ".inst.w 0xf3af8014"
+#else
+#define CSDB ".inst 0xe320f014"
+#endif
+#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
@@ -36,6 +45,13 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
+#ifndef CSDB
+#define CSDB
+#endif
+#ifndef csdb
+#define csdb()
+#endif
+
#ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void);
extern void arm_heavy_mb(void);
@@ -62,6 +78,25 @@ extern void arm_heavy_mb(void);
#define __smp_rmb() __smp_mb()
#define __smp_wmb() dmb(ishst)
+#ifdef CONFIG_CPU_SPECTRE
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ "cmp %1, %2\n"
+ " sbc %0, %1, %1\n"
+ CSDB
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ return mask;
+}
+#define array_index_mask_nospec array_index_mask_nospec
+#endif
+
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index f308c8c40cb9..714440fa2fc6 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 1995, Russell King.
* Various bits and pieces copyrights include:
@@ -159,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_be(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
#ifndef CONFIG_SMP
/*
@@ -214,7 +217,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
@@ -222,93 +224,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else
-static inline int constant_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
- * On ARMv5 and above those functions can be implemented around the
- * clz instruction for much better code efficiency. __clz returns
- * the number of leading zeros, zero input will return 32, and
- * 0x80000000 will return 0.
- */
-static inline unsigned int __clz(unsigned int x)
-{
- unsigned int ret;
-
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-
- return ret;
-}
-
-/*
- * fls() returns zero if the input is zero, otherwise returns the bit
- * position of the last set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int fls(int x)
-{
- if (__builtin_constant_p(x))
- return constant_fls(x);
-
- return 32 - __clz(x);
-}
-
-/*
- * __fls() returns the bit position of the last bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
- */
-static inline unsigned long __fls(unsigned long x)
-{
- return fls(x) - 1;
-}
-
-/*
- * ffs() returns zero if the input was zero, otherwise returns the bit
- * position of the first set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int ffs(int x)
-{
- return fls(x & -x);
-}
-
/*
- * __ffs() returns the bit position of the first bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
*/
-static inline unsigned long __ffs(unsigned long x)
-{
- return ffs(x) - 1;
-}
-
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
#endif
+#include <asm-generic/bitops/ffz.h>
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/bitrev.h b/arch/arm/include/asm/bitrev.h
index ec291c350ea3..84ad8dde62d6 100644
--- a/arch/arm/include/asm/bitrev.h
+++ b/arch/arm/include/asm/bitrev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BITREV_H
#define __ASM_BITREV_H
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 2244a94ed9c9..ba8d9d7d242b 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_BUG_H
#define _ASMARM_BUG_H
@@ -61,8 +62,8 @@ do { \
struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err);
-struct siginfo;
-void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+void arm_notify_die(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr,
unsigned long err, unsigned long trap);
#ifdef CONFIG_ARM_LPAE
@@ -81,10 +82,12 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
struct mm_struct;
-extern void show_pte(struct mm_struct *mm, unsigned long addr);
+void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
+extern void __show_regs_alloc_free(struct pt_regs *regs);
#endif
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index a97f1ea708d1..fe385551edec 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -1,21 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/include/asm/bugs.h
- *
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
-#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
-#define check_bugs() check_writebuffer_bugs()
+#ifdef CONFIG_MMU
+extern void check_other_bugs(void);
#else
-#define check_bugs() do { } while (0)
+#define check_other_bugs() do { } while (0)
#endif
#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 75fe66bc02b4..ecbc100d22a5 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/cache.h
*/
@@ -23,6 +24,12 @@
#define ARCH_SLAB_MINALIGN 8
#endif
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(".data..read_mostly")
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+int cache_line_size(void);
+#endif
+#endif
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 74504b154256..8ed8b9a24efe 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
@@ -35,7 +32,7 @@
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See Documentation/cachetlb.txt for more information.
+ * See Documentation/core-api/cachetlb.rst for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
@@ -234,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
vma->vm_flags);
}
-static inline void
-vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
}
}
@@ -250,22 +248,24 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) \
- vivt_flush_cache_page(vma,addr,pfn)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -283,7 +283,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
- * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * If this page isn't mapped (ie, folio_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
@@ -292,8 +292,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *);
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -315,22 +318,8 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(vma, page, vmaddr);
}
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-extern void flush_kernel_dcache_page(struct page *);
-
-#define flush_dcache_mmap_lock(mapping) \
- spin_lock_irq(&(mapping)->tree_lock)
-#define flush_dcache_mmap_unlock(mapping) \
- spin_unlock_irq(&(mapping)->tree_lock)
-
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
@@ -351,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
dsb(ishst);
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
@@ -455,15 +446,10 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
* however some exceptions may exist. Caveat emptor.
*
* - The clobber list is dictated by the call to v7_flush_dcache_*.
- * fp is preserved to the stack explicitly prior disabling the cache
- * since adding it to the clobber list is incompatible with having
- * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
- * trampoline are inserted by the linker and to keep sp 64-bit aligned.
*/
#define v7_exit_coherency_flush(level) \
asm volatile( \
".arch armv7-a \n\t" \
- "stmfd sp!, {fp, ip} \n\t" \
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
@@ -473,12 +459,18 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
"isb \n\t" \
- "dsb \n\t" \
- "ldmfd sp!, {fp, ip}" \
- : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
- "r9","r10","lr","memory" )
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+
+#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
+void check_cpu_icache_size(int cpuid);
+#else
+static inline void check_cpu_icache_size(int cpuid) { }
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index 01509ae0bbec..b01c59076b84 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CACHETYPE_H
#define __ASM_ARM_CACHETYPE_H
@@ -19,6 +20,8 @@ extern unsigned int cacheid;
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
+#define cpu_dcache_is_aliasing() (cache_is_vivt() || cache_is_vipt_aliasing())
+
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
* Mask out support which will never be present on newer CPUs.
@@ -80,6 +83,14 @@ static inline unsigned int read_ccsidr(void)
asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
return val;
}
+
+static inline unsigned int read_clidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (val));
+ return val;
+}
#else /* CONFIG_CPU_V7M */
#include <linux/io.h>
#include "asm/v7m.h"
@@ -93,6 +104,11 @@ static inline unsigned int read_ccsidr(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
}
+
+static inline unsigned int read_clidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CLIDR);
+}
#endif
#endif
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index 524692f4acab..d8a13959bff0 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/checksum.h
*
@@ -10,6 +11,7 @@
#define __ASM_ARM_CHECKSUM_H
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
@@ -34,10 +36,21 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+
+ return csum_partial_copy_from_user(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h
index 0b350a7e26f3..13651c731a81 100644
--- a/arch/arm/include/asm/clocksource.h
+++ b/arch/arm/include/asm/clocksource.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
-struct arch_clocksource_data {
- bool vdso_direct; /* Usable for direct VDSO access? */
-};
+#include <asm/vdso/clocksource.h>
-#endif
+#endif /* _ASM_CLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 97882f9bad12..9beb64d30586 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CMPXCHG_H
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h>
+#include <linux/cmpxchg-emu.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
@@ -24,7 +26,8 @@
#define swp_is_buggy
#endif
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+static inline unsigned long
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
@@ -113,9 +116,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
- (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))); \
+#define arch_xchg_relaxed(ptr, x) ({ \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
@@ -127,20 +130,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -160,7 +163,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
prefetchw((const void *)ptr);
switch (size) {
-#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+#ifdef CONFIG_CPU_V6 /* ARCH == ARMv6 */
+ case 1:
+ oldval = cmpxchg_emu_u8((volatile u8 *)ptr, old, new);
+ break;
+#else /* min ARCH > ARMv6 */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
@@ -206,7 +213,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -223,7 +230,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -233,7 +240,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -265,13 +272,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
index 29fe85e59439..5e94e67d1083 100644
--- a/arch/arm/include/asm/compiler.h
+++ b/arch/arm/include/asm/compiler.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_COMPILER_H
#define __ASM_ARM_COMPILER_H
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index dbdbce1b3a72..a54230e65647 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CP15_H
#define __ASM_ARM_CP15_H
@@ -49,20 +50,7 @@
#ifdef CONFIG_CPU_CP15
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
- "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
- "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
-
-#define __read_sysreg(r, w, c, t) ({ \
- t __val; \
- asm volatile(r " " c : "=r" (__val)); \
- __val; \
-})
-#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
-
-#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+#include <asm/vdso/cp15.h>
extern unsigned long cr_alignment; /* defined in entry-armv.S */
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 2744f0602550..9d8863537aa5 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/cpu.h
*
* Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_CPU_H
#define __ASM_ARM_CPU_H
@@ -14,7 +11,6 @@
#include <linux/cpu.h>
struct cpuinfo_arm {
- struct cpu cpu;
u32 cpuid;
#ifdef CONFIG_SMP
unsigned int loops_per_jiffy;
diff --git a/arch/arm/include/asm/cpufeature.h b/arch/arm/include/asm/cpufeature.h
index 6d425191d01d..16c161b3ff4d 100644
--- a/arch/arm/include/asm/cpufeature.h
+++ b/arch/arm/include/asm/cpufeature.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_CPUFEATURE_H
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index baefe1d51517..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CPUIDLE_H
#define __ASM_ARM_CPUIDLE_H
@@ -6,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -41,11 +44,15 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section(__cpuidle_method_of_table) \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
extern int arm_cpuidle_init(int cpu);
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b62eaeb147aa..0163c3e78a67 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CPUTYPE_H
#define __ASM_ARM_CPUTYPE_H
-#include <linux/stringify.h>
-#include <linux/kernel.h>
-
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
@@ -27,6 +25,8 @@
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
+#define CPUID_EXT_ISAR6 0x7c
+#define CPUID_EXT_PFR2 0x90
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -42,6 +42,8 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+#define CPUID_EXT_ISAR6 "c2, 7"
+#define CPUID_EXT_PFR2 "c3, 4"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
@@ -61,6 +63,7 @@
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_DEC 0x44
#define ARM_CPU_IMP_INTEL 0x69
@@ -76,8 +79,17 @@
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
+#define ARM_CPU_PART_CORTEX_A53 0x4100d030
+#define ARM_CPU_PART_CORTEX_A57 0x4100d070
+#define ARM_CPU_PART_CORTEX_A72 0x4100d080
+#define ARM_CPU_PART_CORTEX_A73 0x4100d090
+#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
#define ARM_CPU_PART_MASK 0xff00fff0
+/* Broadcom implemented processors */
+#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
+#define ARM_CPU_PART_BRAHMA_B53 0x42001000
+
/* DEC implemented cores */
#define ARM_CPU_PART_SA1100 0x4400a110
@@ -97,7 +109,13 @@
/* Qualcomm implemented cores */
#define ARM_CPU_PART_SCORPION 0x510002d0
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+
extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
@@ -173,6 +191,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CPUID_CACHETYPE);
}
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return read_cpuid(CPUID_MPUIR);
+}
+
#elif defined(CONFIG_CPU_V7M)
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -185,6 +208,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
}
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return readl(BASEADDR_V7M_SCB + MPU_TYPE);
+}
+
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -315,4 +343,6 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
#define cpuid_feature_extract(reg, field) \
cpuid_feature_extract_field(read_cpuid_ext(reg), field)
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
deleted file mode 100644
index 2381199acb7d..000000000000
--- a/arch/arm/include/asm/cti.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef __ASMARM_CTI_H
-#define __ASMARM_CTI_H
-
-#include <asm/io.h>
-#include <asm/hardware/coresight.h>
-
-/* The registers' definition is from section 3.2 of
- * Embedded Cross Trigger Revision: r0p0
- */
-#define CTICONTROL 0x000
-#define CTISTATUS 0x004
-#define CTILOCK 0x008
-#define CTIPROTECTION 0x00C
-#define CTIINTACK 0x010
-#define CTIAPPSET 0x014
-#define CTIAPPCLEAR 0x018
-#define CTIAPPPULSE 0x01c
-#define CTIINEN 0x020
-#define CTIOUTEN 0x0A0
-#define CTITRIGINSTATUS 0x130
-#define CTITRIGOUTSTATUS 0x134
-#define CTICHINSTATUS 0x138
-#define CTICHOUTSTATUS 0x13c
-#define CTIPERIPHID0 0xFE0
-#define CTIPERIPHID1 0xFE4
-#define CTIPERIPHID2 0xFE8
-#define CTIPERIPHID3 0xFEC
-#define CTIPCELLID0 0xFF0
-#define CTIPCELLID1 0xFF4
-#define CTIPCELLID2 0xFF8
-#define CTIPCELLID3 0xFFC
-
-/* The below are from section 3.6.4 of
- * CoreSight v1.0 Architecture Specification
- */
-#define LOCKACCESS 0xFB0
-#define LOCKSTATUS 0xFB4
-
-/**
- * struct cti - cross trigger interface struct
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out_for_irq: triger out number which will cause
- * the @irq happen
- *
- * cti struct used to operate cti registers.
- */
-struct cti {
- void __iomem *base;
- int irq;
- int trig_out_for_irq;
-};
-
-/**
- * cti_init - initialize the cti instance
- * @cti: cti instance
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out: triger out number which will cause
- * the @irq happen
- *
- * called by machine code to pass the board dependent
- * @base, @irq and @trig_out to cti.
- */
-static inline void cti_init(struct cti *cti,
- void __iomem *base, int irq, int trig_out)
-{
- cti->base = base;
- cti->irq = irq;
- cti->trig_out_for_irq = trig_out;
-}
-
-/**
- * cti_map_trigger - use the @chan to map @trig_in to @trig_out
- * @cti: cti instance
- * @trig_in: trigger in number
- * @trig_out: trigger out number
- * @channel: channel number
- *
- * This function maps one trigger in of @trig_in to one trigger
- * out of @trig_out using the channel @chan.
- */
-static inline void cti_map_trigger(struct cti *cti,
- int trig_in, int trig_out, int chan)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINEN + trig_in * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIINEN + trig_in * 4);
-
- val = __raw_readl(base + CTIOUTEN + trig_out * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIOUTEN + trig_out * 4);
-}
-
-/**
- * cti_enable - enable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_enable(struct cti *cti)
-{
- __raw_writel(0x1, cti->base + CTICONTROL);
-}
-
-/**
- * cti_disable - disable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_disable(struct cti *cti)
-{
- __raw_writel(0, cti->base + CTICONTROL);
-}
-
-/**
- * cti_irq_ack - clear the cti irq
- * @cti: cti instance
- *
- * clear the cti irq
- */
-static inline void cti_irq_ack(struct cti *cti)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINTACK);
- val |= BIT(cti->trig_out_for_irq);
- __raw_writel(val, base + CTIINTACK);
-}
-
-/**
- * cti_unlock - unlock cti module
- * @cti: cti instance
- *
- * unlock the cti module, or else any writes to the cti
- * module is not allowed.
- */
-static inline void cti_unlock(struct cti *cti)
-{
- __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-
-/**
- * cti_lock - lock cti module
- * @cti: cti instance
- *
- * lock the cti module, so any writes to the cti
- * module will be not allowed.
- */
-static inline void cti_lock(struct cti *cti)
-{
- __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-#endif
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
new file mode 100644
index 000000000000..5225cb1c803b
--- /dev/null
+++ b/arch/arm/include/asm/current.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Keith Packard <keithp@keithp.com>
+ * Copyright (c) 2021 Google, LLC <ardb@kernel.org>
+ */
+
+#ifndef _ASM_ARM_CURRENT_H
+#define _ASM_ARM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+#include <asm/insn.h>
+
+struct task_struct;
+
+extern struct task_struct *__current;
+
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
+{
+ struct task_struct *cur;
+
+#if __has_builtin(__builtin_thread_pointer) && defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
+ /*
+ * Use the __builtin helper when available - this results in better
+ * code, especially when using GCC in combination with the per-task
+ * stack protector, as the compiler will recognize that it needs to
+ * load the TLS register only once in every function.
+ */
+ cur = __builtin_thread_pointer();
+#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+ asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __current \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r"(cur));
+#elif __LINUX_ARM_ARCH__>= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ cur = __current;
+#else
+ asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
+#endif
+ return cur;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARM_CURRENT_H */
diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h
index b74899de0774..d24c4be724e3 100644
--- a/arch/arm/include/asm/dcc.h
+++ b/arch/arm/include/asm/dcc.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <asm/barrier.h>
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index e986b7f717c4..1d069e558d8d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1995-2004 Russell King
*
@@ -6,7 +7,7 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/param.h> /* HZ */
/*
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 3234fe9bba6e..c6beb1708c64 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -1,25 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
*/
#ifndef ASMARM_DEVICE_H
#define ASMARM_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_DMABOUNCE
- struct dmabounce_device_info *dmabounce;
-#endif
-#ifdef CONFIG_IOMMU_API
- void *iommu; /* private IOMMU data */
-#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
-#ifdef CONFIG_XEN
- const struct dma_map_ops *dev_dma_ops;
-#endif
- unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
};
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 7d919a9b32e5..d3ef8e416b27 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
@@ -20,29 +21,20 @@
* assembly implementation with completely non standard calling convention
* for arguments and results (beware).
*/
-
-#ifdef __ARMEB__
-#define __xh "r0"
-#define __xl "r1"
-#else
-#define __xl "r0"
-#define __xh "r1"
-#endif
-
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
{
register unsigned int __base asm("r4") = base;
register unsigned long long __n asm("r0") = *n;
register unsigned long long __res asm("r2");
- register unsigned int __rem asm(__xh);
- asm( __asmeq("%0", __xh)
+ unsigned int __rem;
+ asm( __asmeq("%0", "r0")
__asmeq("%1", "r2")
- __asmeq("%2", "r0")
- __asmeq("%3", "r4")
+ __asmeq("%2", "r4")
"bl __do_div64"
- : "=r" (__rem), "=r" (__res)
- : "r" (__n), "r" (__base)
+ : "+r" (__n), "=r" (__res)
+ : "r" (__base)
: "ip", "lr", "cc");
+ __rem = __n >> 32;
*n = __res;
return __rem;
}
@@ -60,21 +52,17 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
#else
-/*
- * gcc versions earlier than 4.0 are simply too problematic for the
- * __div64_const32() code in asm-generic/div64.h. First there is
- * gcc PR 15089 that tend to trig on more complex constructs, spurious
- * .global __udivsi3 are inserted even if none of those symbols are
- * referenced in the generated code, and those gcc versions are not able
- * to do constant propagation on long long values anyway.
- */
-
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-
-static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
{
unsigned long long res;
register unsigned int tmp asm("ip") = 0;
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
if (!bias) {
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
@@ -82,7 +70,7 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
: "=&r" (res)
: "r" (m), "r" (n)
: "cc");
- } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ } else if (no_ovf) {
res = m;
asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
"mov %Q0, #0"
@@ -99,7 +87,7 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
: "cc");
}
- if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ if (no_ovf) {
asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
"umlal %R0, %Q0, %Q1, %R2\n\t"
"mov %R0, #0\n\t"
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
deleted file mode 100644
index 4f8e9e5514b1..000000000000
--- a/arch/arm/include/asm/dma-contiguous.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef ASMARM_DMA_CONTIGUOUS_H
-#define ASMARM_DMA_CONTIGUOUS_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
-
-#include <linux/types.h>
-
-void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
-
-#endif
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index c090ec675eac..2ce4c5683e6d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_DMA_IOMMU_H
#define ASMARM_DMA_IOMMU_H
@@ -5,12 +6,8 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/kref.h>
-#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
-
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
@@ -27,7 +24,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
@@ -35,7 +32,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
-int arm_dma_supported(struct device *dev, u64 mask);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
deleted file mode 100644
index 4e0285a66ef8..000000000000
--- a/arch/arm/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,256 +0,0 @@
-#ifndef ASMARM_DMA_MAPPING_H
-#define ASMARM_DMA_MAPPING_H
-
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-
-#include <asm/memory.h>
-
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
-extern const struct dma_map_ops arm_dma_ops;
-extern const struct dma_map_ops arm_coherent_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
-}
-
-#ifdef __arch_page_to_dma
-#error Please update to __arch_pfn_to_dma
-#endif
-
-/*
- * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
- * functions used internally by the DMA-mapping API to provide DMA
- * addresses. They must not be used by drivers.
- */
-#ifndef __arch_pfn_to_dma
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- if (dev)
- pfn -= dev->dma_pfn_offset;
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- unsigned long pfn = __bus_to_pfn(addr);
-
- if (dev)
- pfn += dev->dma_pfn_offset;
-
- return pfn;
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- if (dev) {
- unsigned long pfn = dma_to_pfn(dev, addr);
-
- return phys_to_virt(__pfn_to_phys(pfn));
- }
-
- return (void *)__bus_to_virt((unsigned long)addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- if (dev)
- return pfn_to_dma(dev, virt_to_pfn(addr));
-
- return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
-}
-
-#else
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return __arch_pfn_to_dma(dev, pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_pfn(dev, addr);
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_virt(dev, addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- return __arch_virt_to_dma(dev, addr);
-}
-#endif
-
-/* The ARM override for dma_max_pfn() */
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- return dma_to_pfn(dev, *dev->dma_mask);
-}
-#define dma_max_pfn(dev) dma_max_pfn(dev)
-
-#define arch_setup_dma_ops arch_setup_dma_ops
-extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-
-#define arch_teardown_dma_ops arch_teardown_dma_ops
-extern void arch_teardown_dma_ops(struct device *dev);
-
-/* do not use this function in a driver */
-static inline bool is_device_dma_coherent(struct device *dev)
-{
- return dev->archdata.dma_coherent;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) { }
-
-/**
- * arm_dma_alloc - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
- *
- * Allocate some memory for a device for performing DMA. This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
- */
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, unsigned long attrs);
-
-/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs);
-
-/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-/*
- * This can be called during early boot to increase the size of the atomic
- * coherent DMA pool above the default value of 256KiB. It must be called
- * before postcore_initcall.
- */
-extern void __init init_dma_coherent_pool_size(unsigned long size);
-
-/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- * @needs_bounce_fn: called to determine whether buffer needs bouncing
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- */
-extern int dmabounce_register_dev(struct device *, unsigned long,
- unsigned long, int (*)(struct device *, dma_addr_t, size_t));
-
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
- */
-extern void dmabounce_unregister_dev(struct device *);
-
-
-
-/*
- * The scatter list versions of the above methods.
- */
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index bb4fa67da541..e2a1916013e7 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_DMA_H
#define __ASM_ARM_DMA_H
@@ -9,8 +10,11 @@
#else
#define MAX_DMA_ADDRESS ({ \
extern phys_addr_t arm_dma_zone_size; \
- arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
+
+extern phys_addr_t arm_dma_limit;
+#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
#endif
#ifdef CONFIG_ISA_DMA_API
@@ -105,7 +109,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
- __set_dma_addr(chan, (void *)__bus_to_virt(addr))
+ __set_dma_addr(chan, (void *)isa_bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
@@ -142,10 +146,4 @@ extern int get_dma_residue(unsigned int chan);
#endif /* CONFIG_ISA_DMA_API */
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/dmi.h b/arch/arm/include/asm/dmi.h
index df2d2ff06f5b..32c95dad4cea 100644
--- a/arch/arm/include/asm/dmi.h
+++ b/arch/arm/include/asm/dmi.h
@@ -1,8 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_DMI_H
#define __ASM_DMI_H
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 99d9f630d6b6..d48859fdf32c 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/domain.h
*
* Copyright (C) 1999 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_DOMAIN_H
#define __ASM_PROC_DOMAIN_H
#ifndef __ASSEMBLY__
+#include <linux/thread_info.h>
#include <asm/barrier.h>
-#include <asm/thread_info.h>
#endif
/*
@@ -85,7 +82,7 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
unsigned int domain;
@@ -97,7 +94,7 @@ static inline unsigned int get_domain(void)
return domain;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
@@ -105,37 +102,26 @@ static inline void set_domain(unsigned val)
isb();
}
#else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
{
return 0;
}
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
{
}
#endif
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define modify_domain(dom,type) \
- do { \
- unsigned int domain = get_domain(); \
- domain &= ~domain_mask(dom); \
- domain = domain | domain_val(dom, type); \
- set_domain(domain); \
- } while (0)
-
-#else
-static inline void modify_domain(unsigned dom, unsigned type) { }
-#endif
-
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
-#define TUSER(instr) #instr "t"
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr "t" #cond
#else
-#define TUSER(instr) #instr
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr #cond
#endif
#else /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
index eaea14676d57..7cbe001bf9cc 100644
--- a/arch/arm/include/asm/ecard.h
+++ b/arch/arm/include/asm/ecard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/ecard.h
*
@@ -194,7 +195,7 @@ void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
unsigned long offset, unsigned long maxsize);
#define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr)
-extern struct bus_type ecard_bus_type;
+extern const struct bus_type ecard_bus_type;
#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev)
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
index 5189fa819b60..16f95dd37b24 100644
--- a/arch/arm/include/asm/edac.h
+++ b/arch/arm/include/asm/edac.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2011 Calxeda, Inc.
* Based on PPC version Copyright 2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 17f1f1a814ff..e408399d5f0e 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_EFI_H
@@ -16,24 +13,30 @@
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include <asm/uaccess.h>
#ifdef CONFIG_EFI
void efi_init(void);
+void arm_efi_init(void);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
-int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
-#define arch_efi_call_virt(p, f, args...) \
-({ \
- efi_##f##_t *__f; \
- __f = p->f; \
- __f(args); \
+#ifdef CONFIG_CPU_TTBR0_PAN
+#undef arch_efi_call_virt
+#define arch_efi_call_virt(p, f, args...) ({ \
+ unsigned int flags = uaccess_save_and_enable(); \
+ efi_status_t res = _Generic((p)->f(args), \
+ efi_status_t: (p)->f(args), \
+ default: ((p)->f(args), EFI_ABORTED)); \
+ uaccess_restore(flags); \
+ res; \
})
+#endif
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
@@ -48,26 +51,11 @@ void efi_virtmap_load(void);
void efi_virtmap_unload(void);
#else
-#define efi_init()
+#define arm_efi_init()
#endif /* CONFIG_EFI */
/* arch specific definitions used by the stub code */
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
-#define __efi_call_early(f, ...) f(__VA_ARGS__)
-#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
-#define efi_is_64bit() (false)
-
-#define efi_call_proto(protocol, f, instance, ...) \
- ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
-
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
-void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
-
-static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
-{
-}
-
/*
* A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
* so we will reserve that amount of memory. We have no easy way to tell what
@@ -78,25 +66,29 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
/*
- * The kernel zImage should preferably be located between 32 MB and 128 MB
- * from the base of DRAM. The min address leaves space for a maximal size
- * uncompressed image, and the max address is due to how the zImage decompressor
- * picks a destination address.
+ * phys-to-virt patching requires that the physical to virtual offset is a
+ * multiple of 2 MiB. However, using an alignment smaller than TEXT_OFFSET
+ * here throws off the memory allocation logic, so let's use the lowest power
+ * of two greater than 2 MiB and greater than TEXT_OFFSET.
*/
-#define ZIMAGE_OFFSET_LIMIT SZ_128M
-#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
+#define EFI_PHYS_ALIGN max(UL(SZ_2M), roundup_pow_of_two(TEXT_OFFSET))
-/* on ARM, the FDT should be located in the first 128 MB of RAM */
-static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
+/* on ARM, the initrd should be loaded in a lowmem region */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return dram_base + ZIMAGE_OFFSET_LIMIT;
+ return round_down(image_addr, SZ_4M) + SZ_512M;
}
-/* on ARM, the initrd should be loaded in a lowmem region */
-static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
- unsigned long image_addr)
+struct efi_arm_entry_state {
+ u32 cpsr_before_ebs;
+ u32 sctlr_before_ebs;
+ u32 cpsr_after_ebs;
+ u32 sctlr_after_ebs;
+};
+
+static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- return dram_base + SZ_512M;
+ __cpuc_flush_dcache_area(addr, size);
}
#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index f13ae153fb24..9f21e170320f 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ELF_H
#define __ASMARM_ELF_H
#include <asm/auxvec.h>
#include <asm/hwcap.h>
-#include <asm/vdso_datapage.h>
/*
* ELF register definitions..
@@ -50,6 +50,7 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
@@ -57,11 +58,18 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
/*
* These are used to set parameters in the core dumps.
@@ -100,14 +108,15 @@ struct elf32_hdr;
extern int elf_check_arch(const struct elf32_hdr *);
#define elf_check_arch elf_check_arch
-#define vmcore_elf64_check_arch(x) (0)
+#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC platform */
+#define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
+#define elf_check_const_displacement(x) ((x)->e_flags & EF_ARM_PIC)
+#define ELF_FDPIC_CORE_EFLAGS 0
-extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
-#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
+#define vmcore_elf64_check_arch(x) (0)
-struct task_struct;
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
-#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+extern int arm_elf_read_implies_exec(int);
+#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
@@ -120,6 +129,13 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->ARM_r7 = _exec_map_addr; \
+ (_r)->ARM_r8 = _interp_map_addr; \
+ (_r)->ARM_r9 = dynamic_addr; \
+ } while(0)
+
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
deleted file mode 100644
index 609184f522ee..000000000000
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ /dev/null
@@ -1,39 +0,0 @@
-#include <asm/assembler.h>
-
-/*
- * Interrupt handling. Preserves r7, r8, r9
- */
- .macro arch_irq_handler_default
- get_irqnr_preamble r6, lr
-1: get_irqnr_and_base r0, r2, r6, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- badrne lr, 1b
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r2) and base (r6) are
- * preserved from get_irqnr_and_base above
- */
- ALT_SMP(test_for_ipi r0, r2, r6, lr)
- ALT_UP_B(9997f)
- movne r1, sp
- badrne lr, 1b
- bne do_IPI
-#endif
-9997:
- .endm
-
- .macro arch_irq_handler, symbol_name
- .align 5
- .global \symbol_name
-\symbol_name:
- mov r8, lr
- arch_irq_handler_default
- ret r8
- .endm
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
index bf1991263d2d..3c82975d46db 100644
--- a/arch/arm/include/asm/exception.h
+++ b/arch/arm/include/asm/exception.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Annotations for marking C functions as exception handlers.
*
@@ -9,11 +10,6 @@
#include <linux/interrupt.h>
-#define __exception __attribute__((section(".exception.text")))
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry __exception
-#endif
#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
deleted file mode 100644
index d92e99cd8c8a..000000000000
--- a/arch/arm/include/asm/fb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-}
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index d493d0b742a1..6bdfb4a47322 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/fiq.h
*
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
index 34c1d96ef46d..23fe0bd405c7 100644
--- a/arch/arm/include/asm/firmware.h
+++ b/arch/arm/include/asm/firmware.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Samsung Electronics.
* Kyungmin Park <kyungmin.park@samsung.com>
* Tomasz Figa <t.figa@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FIRMWARE_H
@@ -24,7 +21,7 @@ struct firmware_ops {
/*
* Inform the firmware we intend to enter CPU idle mode
*/
- int (*prepare_idle)(void);
+ int (*prepare_idle)(unsigned long mode);
/*
* Enters CPU idle mode
*/
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 8f967d1373f6..707068f852c2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -1,19 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-#define FIXADDR_START 0xffc00000UL
+#define FIXADDR_START 0xffc80000UL
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
-#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <asm/kmap_size.h>
enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
deleted file mode 100644
index 29d3a1524bce..000000000000
--- a/arch/arm/include/asm/flat.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/include/asm/flat.h -- uClinux flat-format executables
- */
-
-#ifndef __ARM_FLAT_H__
-#define __ARM_FLAT_H__
-
-#include <linux/uaccess.h>
-
-#define flat_argvp_envp_on_stack() 1
-#define flat_old_ram_flag(flags) (flags)
-#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-
-static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
- u32 *addr, u32 *persistent)
-{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
-#else
- return get_user(*addr, rp);
-#endif
-}
-
-static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
-{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
-#else
- return put_user(addr, rp);
-#endif
-}
-
-#define flat_get_relocate_addr(rel) (rel)
-#define flat_set_persistent(relval, p) 0
-
-#endif /* __ARM_FLAT_H__ */
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index 85a34cc8316a..e579f77162e9 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -1,29 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/floppy.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here
*/
#ifndef __ASM_ARM_FLOPPY_H
#define __ASM_ARM_FLOPPY_H
-#if 0
-#include <mach/floppy.h>
-#endif
-#define fd_outb(val,port) \
- do { \
- if ((port) == (u32)FD_DOR) \
- fd_setdor((val)); \
- else \
- outb((val),(port)); \
+#define fd_outb(val, base, reg) \
+ do { \
+ int new_val = (val); \
+ if ((reg) == FD_DOR) { \
+ if (new_val & 0xf0) \
+ new_val = (new_val & 0x0c) | \
+ floppy_selects[new_val & 3]; \
+ else \
+ new_val &= 0x0c; \
+ } \
+ outb(new_val, (base) + (reg)); \
} while(0)
-#define fd_inb(port) inb((port))
+#define fd_inb(base, reg) inb((base) + (reg))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
@@ -56,69 +55,7 @@ static inline int fd_dma_setup(void *data, unsigned int length,
* to a non-zero track, and then restoring it to track 0. If an error occurs,
* then there is no floppy drive present. [to be put back in again]
*/
-static unsigned char floppy_selects[2][4] =
-{
- { 0x10, 0x21, 0x23, 0x33 },
- { 0x10, 0x21, 0x23, 0x33 }
-};
-
-#define fd_setdor(dor) \
-do { \
- int new_dor = (dor); \
- if (new_dor & 0xf0) \
- new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \
- else \
- new_dor &= 0x0c; \
- outb(new_dor, FD_DOR); \
-} while (0)
-
-/*
- * Someday, we'll automatically detect which drives are present...
- */
-static inline void fd_scandrives (void)
-{
-#if 0
- int floppy, drive_count;
-
- fd_disable_irq();
- raw_cmd = &default_raw_cmd;
- raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
- raw_cmd->track = 0;
- raw_cmd->rate = ?;
- drive_count = 0;
- for (floppy = 0; floppy < 4; floppy ++) {
- current_drive = drive_count;
- /*
- * Turn on floppy motor
- */
- if (start_motor(redo_fd_request))
- continue;
- /*
- * Set up FDC
- */
- fdc_specify();
- /*
- * Tell FDC to recalibrate
- */
- output_byte(FD_RECALIBRATE);
- LAST_OUT(UNIT(floppy));
- /* wait for command to complete */
- if (!successful) {
- int i;
- for (i = drive_count; i < 3; i--)
- floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
- floppy_selects[fdc][3] = 0;
- floppy -= 1;
- } else
- drive_count++;
- }
-#else
- floppy_selects[0][0] = 0x10;
- floppy_selects[0][1] = 0x21;
- floppy_selects[0][2] = 0x23;
- floppy_selects[0][3] = 0x33;
-#endif
-}
+static unsigned char floppy_selects[4] = { 0x10, 0x21, 0x23, 0x33 };
#define FDC1 (0x3f0)
@@ -128,8 +65,6 @@ static inline void fd_scandrives (void)
#define N_FDC 1
#define N_DRIVE 4
-#define CROSS_64KB(a,s) (0)
-
/*
* This allows people to reverse the order of
* fd0 and fd1, in case their hardware is
@@ -138,9 +73,7 @@ static inline void fd_scandrives (void)
*/
static void driveswap(int *ints, int dummy, int dummy2)
{
- floppy_selects[0][0] ^= floppy_selects[0][1];
- floppy_selects[0][1] ^= floppy_selects[0][0];
- floppy_selects[0][0] ^= floppy_selects[0][1];
+ swap(floppy_selects[0], floppy_selects[1]);
}
#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
index de5354746924..78217b0e76e3 100644
--- a/arch/arm/include/asm/fncpy.h
+++ b/arch/arm/include/asm/fncpy.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
index 3ad4c10d0d84..e29d9c7a5238 100644
--- a/arch/arm/include/asm/fpstate.h
+++ b/arch/arm/include/asm/fpstate.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/fpstate.h
*
* Copyright (C) 1995 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FPSTATE_H
@@ -49,9 +46,6 @@ union vfp_state {
struct vfp_hard_struct hard;
};
-extern void vfp_flush_thread(union vfp_state *);
-extern void vfp_release_thread(union vfp_state *);
-
#define FP_HARD_SIZE 35
struct fp_hard_struct {
@@ -80,14 +74,6 @@ union fp_state {
#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
-struct crunch_state {
- unsigned int mvdx[16][2];
- unsigned int mvax[4][3];
- unsigned int dspsc[2];
-};
-
-#define CRUNCH_SIZE sizeof(struct crunch_state)
-
#endif
#endif
diff --git a/arch/arm/include/asm/fpu.h b/arch/arm/include/asm/fpu.h
new file mode 100644
index 000000000000..2ae50bdce59b
--- /dev/null
+++ b/arch/arm/include/asm/fpu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 SiFive
+ */
+
+#ifndef __ASM_FPU_H
+#define __ASM_FPU_H
+
+#include <asm/neon.h>
+
+#define kernel_fpu_available() cpu_has_neon()
+#define kernel_fpu_begin() kernel_neon_begin()
+#define kernel_fpu_end() kernel_neon_end()
+
+#endif /* ! __ASM_FPU_H */
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f379881d5cc3..5be3ddc96a50 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
@@ -10,13 +13,12 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
-extern void mcount(void);
extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
-#ifdef CONFIG_OLD_MCOUNT
- bool old_mcount;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct module *mod;
#endif
};
@@ -25,9 +27,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1;
}
-
-extern void ftrace_caller_old(void);
-extern void ftrace_call_old(void);
#endif
#endif
@@ -51,7 +50,7 @@ void *return_address(unsigned int);
static inline void *return_address(unsigned int level)
{
- return NULL;
+ return NULL;
}
#endif
@@ -76,6 +75,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
return !strcasecmp(sym, name);
}
+void prepare_ftrace_return(unsigned long *parent, unsigned long self,
+ unsigned long frame_pointer,
+ unsigned long stack_pointer);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368ad023..a9151884bc85 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_FUTEX_H
#define _ASM_ARM_FUTEX_H
@@ -49,7 +50,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret;
u32 val;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
smp_mb();
@@ -103,16 +104,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret = 0;
u32 val;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ " .syntax unified\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: " TUSER(streq) " %3, [%4]\n"
+ "2: " TUSERCOND(str, eq) " %3, [%4]\n"
__futex_atomic_ex_table("%5")
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
@@ -128,24 +130,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
#ifndef CONFIG_SMP
preempt_disable();
#endif
- pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -167,22 +161,18 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
- pagefault_enable();
#ifndef CONFIG_SMP
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 01c3d92624e5..4186fbf7341f 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
@@ -117,6 +114,14 @@
# endif
#endif
+#if defined(CONFIG_CACHE_B15_RAC)
+# define MULTI_CACHE 1
+#endif
+
+#ifdef CONFIG_CPU_CACHE_NOP
+# define MULTI_CACHE 1
+#endif
+
#if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1
#endif
@@ -125,29 +130,15 @@
#error Unknown cache maintenance model
#endif
-#ifndef __ASSEMBLER__
-static inline void nop_flush_icache_all(void) { }
-static inline void nop_flush_kern_cache_all(void) { }
-static inline void nop_flush_kern_cache_louis(void) { }
-static inline void nop_flush_user_cache_all(void) { }
-static inline void nop_flush_user_cache_range(unsigned long a,
- unsigned long b, unsigned int c) { }
-
-static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
-static inline int nop_coherent_user_range(unsigned long a,
- unsigned long b) { return 0; }
-static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
-
-static inline void nop_dma_flush_range(const void *a, const void *b) { }
-
-static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
-static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
-#endif
-
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+/* This function only has a dedicated assembly callback on the v7 cache */
+#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
+#else
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
+#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 04e18b656659..209e46c02ddd 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h
index d385f37c13f0..a033929fad3a 100644
--- a/arch/arm/include/asm/glue-pf.h
+++ b/arch/arm/include/asm/glue-pf.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 74be7c22035a..52df74aa3c2c 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index fbf71d75ec83..377fd4cfab01 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file provides the glue to stick the processor-specific bits
* into the kernel in an efficient manner. The idea is to use branches
* when we're only targeting one class of TLB, or indirect calls
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
deleted file mode 100644
index 504dcddebfcc..000000000000
--- a/arch/arm/include/asm/gpio.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _ARCH_ARM_GPIO_H
-#define _ARCH_ARM_GPIO_H
-
-#if CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#endif
-
-/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
-#include <asm-generic/gpio.h>
-
-/* The trivial gpiolib dispatchers */
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-
-/*
- * Provide a default gpio_to_irq() which should satisfy every case.
- * However, some platforms want to do this differently, so allow them
- * to override it.
- */
-#ifndef gpio_to_irq
-#define gpio_to_irq __gpio_to_irq
-#endif
-
-#endif /* _ARCH_ARM_GPIO_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 3d7351c844aa..706efafbf972 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -1,32 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 7
-
-typedef struct {
- unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#else
-#define smp_irq_stat_cpu(cpu) 0
-#endif
-
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
new file mode 100644
index 000000000000..9694808ee97c
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AURORA shared L2 cache controller support
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
+#define __ASM_ARM_HARDWARE_AURORA_L2_H
+
+#define AURORA_SYNC_REG 0x700
+#define AURORA_RANGE_BASE_ADDR_REG 0x720
+#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
+#define AURORA_INVAL_RANGE_REG 0x774
+#define AURORA_CLEAN_RANGE_REG 0x7b4
+#define AURORA_FLUSH_RANGE_REG 0x7f4
+
+#define AURORA_ACR_REPLACEMENT_OFFSET 27
+#define AURORA_ACR_REPLACEMENT_MASK \
+ (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
+ (0 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
+ (1 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
+ (3 << AURORA_ACR_REPLACEMENT_OFFSET)
+
+#define AURORA_ACR_PARITY_EN (1 << 21)
+#define AURORA_ACR_ECC_EN (1 << 20)
+
+#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
+#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
+ (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
+ (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
+ (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
+ (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+
+#define AURORA_ERR_CNT_REG 0x600
+#define AURORA_ERR_ATTR_CAP_REG 0x608
+#define AURORA_ERR_ADDR_CAP_REG 0x60c
+#define AURORA_ERR_WAY_CAP_REG 0x610
+#define AURORA_ERR_INJECT_CTL_REG 0x614
+#define AURORA_ERR_INJECT_MASK_REG 0x618
+
+#define AURORA_ERR_CNT_CLR_OFFSET 31
+#define AURORA_ERR_CNT_CLR \
+ (0x1 << AURORA_ERR_CNT_CLR_OFFSET)
+#define AURORA_ERR_CNT_UE_OFFSET 16
+#define AURORA_ERR_CNT_UE_MASK \
+ (0x7fff << AURORA_ERR_CNT_UE_OFFSET)
+#define AURORA_ERR_CNT_CE_OFFSET 0
+#define AURORA_ERR_CNT_CE_MASK \
+ (0xffff << AURORA_ERR_CNT_CE_OFFSET)
+
+#define AURORA_ERR_ATTR_SRC_OFF 16
+#define AURORA_ERR_ATTR_SRC_MSK \
+ (0x7 << AURORA_ERR_ATTR_SRC_OFF)
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_ATTR_TXN_MSK \
+ (0xf << AURORA_ERR_ATTR_TXN_OFF)
+#define AURORA_ERR_ATTR_ERR_OFF 8
+#define AURORA_ERR_ATTR_ERR_MSK \
+ (0x3 << AURORA_ERR_ATTR_ERR_OFF)
+#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
+#define AURORA_ERR_ATTR_CAP_VALID \
+ (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
+
+#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
+
+#define AURORA_ERR_WAY_IDX_OFF 8
+#define AURORA_ERR_WAY_IDX_MSK \
+ (0xfff << AURORA_ERR_WAY_IDX_OFF)
+#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
+#define AURORA_ERR_WAY_CAP_WAY_MASK \
+ (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
+
+#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
+#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
+#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
+
+#define AURORA_MAX_RANGE_SIZE 1024
+
+#define AURORA_WAY_SIZE_SHIFT 2
+
+#define AURORA_CTRL_FW 0x100
+
+/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
+ * the distinction between a number coming from hardware and a number
+ * coming from the device tree */
+#define AURORA_CACHE_ID 0x100
+
+#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/include/asm/hardware/cache-b15-rac.h b/arch/arm/include/asm/hardware/cache-b15-rac.h
new file mode 100644
index 000000000000..3d43ec06fd35
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-b15-rac.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+#define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+
+#ifndef __ASSEMBLY__
+
+void b15_flush_kern_cache_all(void);
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
index 12e1588dc4f1..eb2e7b7f70a8 100644
--- a/arch/arm/include/asm/hardware/cache-feroceon-l2.h
+++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-feroceon-l2.h
*
* Copyright (C) 2008 Marvell Semiconductor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
extern void __init feroceon_l2_init(int l2_wt_override);
extern int __init feroceon_of_init(void);
-
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 736292b42fca..5a7ee70f561c 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -1,26 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-l2x0.h
*
* Copyright (C) 2007 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARM_HARDWARE_L2X0_H
#define __ASM_ARM_HARDWARE_L2X0_H
#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
#define L2X0_CACHE_ID 0x000
#define L2X0_CACHE_TYPE 0x004
@@ -130,6 +120,8 @@
#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
+#define L310_AUX_CTRL_FWA_SHIFT 23
+#define L310_AUX_CTRL_FWA_MASK (3 << 23)
#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
index 295e2e40151b..4e493facaa31 100644
--- a/arch/arm/include/asm/hardware/cache-tauros2.h
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-tauros2.h
*
* Copyright (C) 2008 Marvell Semiconductor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
diff --git a/arch/arm/include/asm/hardware/cache-uniphier.h b/arch/arm/include/asm/hardware/cache-uniphier.h
index 0ef42ae75b6c..b1fefca65d4d 100644
--- a/arch/arm/include/asm/hardware/cache-uniphier.h
+++ b/arch/arm/include/asm/hardware/cache-uniphier.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2016 Socionext Inc.
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __CACHE_UNIPHIER_H
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
index 61576dc58ede..44f2bde379c5 100644
--- a/arch/arm/include/asm/hardware/cp14.h
+++ b/arch/arm/include/asm/hardware/cp14.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __ASM_HARDWARE_CP14_H
diff --git a/arch/arm/include/asm/hardware/dec21285.h b/arch/arm/include/asm/hardware/dec21285.h
index 0d7552751aaf..894f2a635cbb 100644
--- a/arch/arm/include/asm/hardware/dec21285.h
+++ b/arch/arm/include/asm/hardware/dec21285.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/dec21285.h
*
* Copyright (C) 1998 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* DC21285 registers
*/
#define DC21285_PCI_IACK 0x79000000
@@ -25,6 +22,13 @@
#define DC21285_IO(x) (x)
#endif
+/*
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
+ * would clash with VGA cards.
+ */
+#define BUS_OFFSET 0xe0000000
+
#define CSR_PCICMD DC21285_IO(0x0004)
#define CSR_CLASSREV DC21285_IO(0x0008)
#define CSR_PCICACHELINESIZE DC21285_IO(0x000c)
@@ -84,19 +88,6 @@
#define SA110_CNTL_XCSDIR(x) ((x)<<28)
#define SA110_CNTL_PCICFN (1 << 31)
-/*
- * footbridge_cfn_mode() is used when we want
- * to check whether we are the central function
- */
-#define __footbridge_cfn_mode() (*CSR_SA110_CNTL & SA110_CNTL_PCICFN)
-#if defined(CONFIG_FOOTBRIDGE_HOST) && defined(CONFIG_FOOTBRIDGE_ADDIN)
-#define footbridge_cfn_mode() __footbridge_cfn_mode()
-#elif defined(CONFIG_FOOTBRIDGE_HOST)
-#define footbridge_cfn_mode() (1)
-#else
-#define footbridge_cfn_mode() (0)
-#endif
-
#define CSR_PCIADDR_EXTN DC21285_IO(0x0140)
#define CSR_PREFETCHMEMRANGE DC21285_IO(0x0144)
#define CSR_XBUS_CYCLE DC21285_IO(0x0148)
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
deleted file mode 100644
index 8c215acd9b57..000000000000
--- a/arch/arm/include/asm/hardware/entry-macro-iomd.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-iomd.S
- *
- * Low-level IRQ helper macros for IOC/IOMD based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-/* IOC / IOMD based hardware */
-#include <asm/hardware/iomd.h>
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
- ldr \tmp, =irq_prio_h
- teq \irqstat, #0
-#ifdef IOMD_BASE
- ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma
- addeq \tmp, \tmp, #256 @ irq_prio_h table size
- teqeq \irqstat, #0
- bne 2406f
-#endif
- ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
- addeq \tmp, \tmp, #256 @ irq_prio_d table size
- teqeq \irqstat, #0
-#ifdef IOMD_IRQREQC
- ldreqb \irqstat, [\base, #IOMD_IRQREQC]
- addeq \tmp, \tmp, #256 @ irq_prio_l table size
- teqeq \irqstat, #0
-#endif
-#ifdef IOMD_IRQREQD
- ldreqb \irqstat, [\base, #IOMD_IRQREQD]
- addeq \tmp, \tmp, #256 @ irq_prio_lc table size
- teqeq \irqstat, #0
-#endif
-2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number
- .endm
-
-/*
- * Interrupt table (incorporates priority). Please note that we
- * rely on the order of these tables (see above code).
- */
- .align 5
-irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
-#ifdef IOMD_BASE
-irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
-#endif
-irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
-#ifdef IOMD_IRQREQC
-irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
-#endif
-#ifdef IOMD_IRQREQD
-irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
-#endif
-
diff --git a/arch/arm/include/asm/hardware/ioc.h b/arch/arm/include/asm/hardware/ioc.h
index 1f6b8013becb..6edd27fcd048 100644
--- a/arch/arm/include/asm/hardware/ioc.h
+++ b/arch/arm/include/asm/hardware/ioc.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/ioc.h
*
* Copyright (C) Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Use these macros to read/write the IOC. All it does is perform the actual
* read/write.
*/
diff --git a/arch/arm/include/asm/hardware/iomd.h b/arch/arm/include/asm/hardware/iomd.h
index f9ee69e4f53e..53006ba5350f 100644
--- a/arch/arm/include/asm/hardware/iomd.h
+++ b/arch/arm/include/asm/hardware/iomd.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/iomd.h
*
* Copyright (C) 1999 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains information out the IOMD ASIC used in the
* Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
*/
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
deleted file mode 100644
index 240b29ef17db..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- * Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef _ADMA_H
-#define _ADMA_H
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iop_adma.h>
-
-/* Memory copy units */
-#define DMA_CCR(chan) (chan->mmr_base + 0x0)
-#define DMA_CSR(chan) (chan->mmr_base + 0x4)
-#define DMA_DAR(chan) (chan->mmr_base + 0xc)
-#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
-#define DMA_PADR(chan) (chan->mmr_base + 0x14)
-#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
-#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
-#define DMA_BCR(chan) (chan->mmr_base + 0x20)
-#define DMA_DCR(chan) (chan->mmr_base + 0x24)
-
-/* Application accelerator unit */
-#define AAU_ACR(chan) (chan->mmr_base + 0x0)
-#define AAU_ASR(chan) (chan->mmr_base + 0x4)
-#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
-#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
-#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
-#define AAU_DAR(chan) (chan->mmr_base + 0x20)
-#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
-#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
-#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
-#define AAU_EDCR0_IDX 8
-#define AAU_EDCR1_IDX 17
-#define AAU_EDCR2_IDX 26
-
-#define DMA0_ID 0
-#define DMA1_ID 1
-#define AAU_ID 2
-
-struct iop3xx_aau_desc_ctrl {
- unsigned int int_en:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int blk_ctrl:2;
- unsigned int dual_xor_en:1;
- unsigned int tx_complete:1;
- unsigned int zero_result_err:1;
- unsigned int zero_result_en:1;
- unsigned int dest_write_en:1;
-};
-
-struct iop3xx_aau_e_desc_ctrl {
- unsigned int reserved:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int reserved2:7;
-};
-
-struct iop3xx_dma_desc_ctrl {
- unsigned int pci_transaction:4;
- unsigned int int_en:1;
- unsigned int dac_cycle_en:1;
- unsigned int mem_to_mem_en:1;
- unsigned int crc_data_tx_en:1;
- unsigned int crc_gen_en:1;
- unsigned int crc_seed_dis:1;
- unsigned int reserved:21;
- unsigned int crc_tx_complete:1;
-};
-
-struct iop3xx_desc_dma {
- u32 next_desc;
- union {
- u32 pci_src_addr;
- u32 pci_dest_addr;
- u32 src_addr;
- };
- union {
- u32 upper_pci_src_addr;
- u32 upper_pci_dest_addr;
- };
- union {
- u32 local_pci_src_addr;
- u32 local_pci_dest_addr;
- u32 dest_addr;
- };
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_dma_desc_ctrl desc_ctrl_field;
- };
- u32 crc_addr;
-};
-
-struct iop3xx_desc_aau {
- u32 next_desc;
- u32 src[4];
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- } src_edc[31];
-};
-
-struct iop3xx_aau_gfmr {
- unsigned int gfmr1:8;
- unsigned int gfmr2:8;
- unsigned int gfmr3:8;
- unsigned int gfmr4:8;
-};
-
-struct iop3xx_desc_pq_xor {
- u32 next_desc;
- u32 src[3];
- union {
- u32 data_mult1;
- struct iop3xx_aau_gfmr data_mult1_field;
- };
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- u32 data_multiplier;
- struct iop3xx_aau_gfmr data_mult_field;
- u32 reserved;
- } src_edc_gfmr[19];
-};
-
-struct iop3xx_desc_dual_xor {
- u32 next_desc;
- u32 src0_addr;
- u32 src1_addr;
- u32 h_src_addr;
- u32 d_src_addr;
- u32 h_dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- u32 d_dest_addr;
-};
-
-union iop3xx_desc {
- struct iop3xx_desc_aau *aau;
- struct iop3xx_desc_dma *dma;
- struct iop3xx_desc_pq_xor *pq_xor;
- struct iop3xx_desc_dual_xor *dual_xor;
- void *ptr;
-};
-
-/* No support for p+q operations */
-static inline int
-iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr, unsigned char coef)
-{
- BUG();
-}
-
-static inline int
-iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- BUG();
-}
-
-#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
-
-static inline void
-iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
- dma_addr_t *src)
-{
- BUG();
-}
-
-static inline int iop_adma_get_max_xor(void)
-{
- return 32;
-}
-
-static inline int iop_adma_get_max_pq(void)
-{
- BUG();
- return 0;
-}
-
-static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- return __raw_readl(DMA_DAR(chan));
- case AAU_ID:
- return __raw_readl(AAU_ADAR(chan));
- default:
- BUG();
- }
- return 0;
-}
-
-static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
- u32 next_desc_addr)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- __raw_writel(next_desc_addr, DMA_NDAR(chan));
- break;
- case AAU_ID:
- __raw_writel(next_desc_addr, AAU_ANDAR(chan));
- break;
- }
-
-}
-
-#define IOP_ADMA_STATUS_BUSY (1 << 10)
-#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
-#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
-#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
-
-static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
-}
-
-static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
- int num_slots)
-{
- /* num_slots will only ever be 1, 2, 4, or 8 */
- return (desc->idx & (num_slots - 1)) ? 0 : 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- static const char slot_count_table[] = {
- 1, 1, 1, 1, /* 01 - 04 */
- 2, 2, 2, 2, /* 05 - 08 */
- 4, 4, 4, 4, /* 09 - 12 */
- 4, 4, 4, 4, /* 13 - 16 */
- 8, 8, 8, 8, /* 17 - 20 */
- 8, 8, 8, 8, /* 21 - 24 */
- 8, 8, 8, 8, /* 25 - 28 */
- 8, 8, 8, 8, /* 29 - 32 */
- };
- *slots_per_op = slot_count_table[src_cnt - 1];
- return *slots_per_op;
-}
-
-static inline int
-iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return iop_chan_memcpy_slot_count(0, slots_per_op);
- case AAU_ID:
- return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
- default:
- BUG();
- }
- return 0;
-}
-
-static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
- * descriptors
- */
-static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->byte_count;
- case AAU_ID:
- return hw_desc.aau->byte_count;
- default:
- BUG();
- }
- return 0;
-}
-
-/* translate the src_idx to a descriptor word index */
-static inline int __desc_idx(int src_idx)
-{
- static const int desc_idx_table[] = { 0, 0, 0, 0,
- 0, 1, 2, 3,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 14, 15, 16, 17,
- 18, 19, 20, 21,
- 23, 24, 25, 26,
- 27, 28, 29, 30,
- };
-
- return desc_idx_table[src_idx];
-}
-
-static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- int src_idx)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->src_addr;
- case AAU_ID:
- break;
- default:
- BUG();
- }
-
- if (src_idx < 4)
- return hw_desc.aau->src[src_idx];
- else
- return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
-}
-
-static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
- int src_idx, dma_addr_t addr)
-{
- if (src_idx < 4)
- hw_desc->src[src_idx] = addr;
- else
- hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
-}
-
-static inline void
-iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_dma_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.mem_to_mem_en = 1;
- u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
- hw_desc->upper_pci_src_addr = 0;
- hw_desc->crc_addr = 0;
-}
-
-static inline void
-iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline u32
-iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
- unsigned long flags)
-{
- int i, shift;
- u32 edcr;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- edcr = 0;
- shift = 1;
- for (i = 24; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
- src_cnt = 24;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- edcr = 0;
- shift = 1;
- for (i = 16; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
- src_cnt = 16;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- edcr = 0;
- shift = 1;
- for (i = 8; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
- src_cnt = 8;
- /* fall through */
- case 2 ... 8:
- shift = 1;
- for (i = 0; i < src_cnt; i++) {
- u_desc_ctrl.value |= (1 << shift);
- shift += 3;
- }
-
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-
- return u_desc_ctrl.value;
-}
-
-static inline void
-iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
-}
-
-/* return the number of operations */
-static inline int
-iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
- int i, j;
-
- hw_desc = desc->hw_desc;
-
- for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, j++) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.zero_result_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- iter->desc_ctrl = u_desc_ctrl.value;
-
- /* for the subsequent descriptors preserve the store queue
- * and chain them together
- */
- if (i) {
- prev_hw_desc =
- iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
- prev_hw_desc->next_desc =
- (u32) (desc->async_tx.phys + (i << 5));
- }
- }
-
- return j;
-}
-
-static inline void
-iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 1 ... 8:
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- u32 byte_count)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->byte_count = byte_count;
- break;
- case AAU_ID:
- hw_desc.aau->byte_count = byte_count;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- iop_desc_init_memcpy(desc, 1);
- hw_desc.dma->byte_count = 0;
- hw_desc.dma->dest_addr = 0;
- hw_desc.dma->src_addr = 0;
- break;
- case AAU_ID:
- iop_desc_init_null_xor(desc, 2, 1);
- hw_desc.aau->byte_count = 0;
- hw_desc.aau->dest_addr = 0;
- hw_desc.aau->src[0] = 0;
- hw_desc.aau->src[1] = 0;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- int slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int i = 0;
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- hw_desc->byte_count = len;
- } else {
- do {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- i += slots_per_op;
- } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
-
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = len;
- }
-}
-
-static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- dma_addr_t addr)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->dest_addr = addr;
- break;
- case AAU_ID:
- hw_desc.aau->dest_addr = addr;
- break;
- default:
- BUG();
- }
-}
-
-static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
- dma_addr_t addr)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- hw_desc->src_addr = addr;
-}
-
-static inline void
-iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
- int src_idx, dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
- u32 next_desc_addr)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- iop_paranoia(hw_desc.dma->next_desc);
- hw_desc.dma->next_desc = next_desc_addr;
-}
-
-static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- return hw_desc.dma->next_desc;
-}
-
-static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- hw_desc.dma->next_desc = 0;
-}
-
-static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
- u32 val)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- hw_desc->src[0] = val;
-}
-
-static inline enum sum_check_flags
-iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
-
- iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
- return desc_ctrl.zero_result_err << SUM_CHECK_P;
-}
-
-static inline void iop_chan_append(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl;
-
- dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl |= 0x2;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
-{
- return __raw_readl(DMA_CSR(chan));
-}
-
-static inline void iop_chan_disable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl &= ~1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_chan_enable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
-
- dma_chan_ctrl |= 1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 9);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 8);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
- break;
- case AAU_ID:
- status &= (1 << 5);
- break;
- default:
- BUG();
- }
-
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline int
-iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return test_bit(5, &status);
-}
-
-static inline int
-iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(2, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(3, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(1, &status);
- default:
- return 0;
- }
-}
-#endif /* _ADMA_H */
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
deleted file mode 100644
index 2594a95ff19a..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/iop3xx.h
- *
- * Intel IOP32X and IOP33X register definitions
- *
- * Author: Rory Bolt <rorybolt@pacbell.net>
- * Copyright (C) 2002 Rory Bolt
- * Copyright (C) 2004 Intel Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __IOP3XX_H
-#define __IOP3XX_H
-
-/*
- * IOP3XX GPIO handling
- */
-#define IOP3XX_GPIO_LINE(x) (x)
-
-#ifndef __ASSEMBLY__
-extern int init_atu;
-extern int iop3xx_get_init_atu(void);
-#endif
-
-
-/*
- * IOP3XX processor registers
- */
-#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
-#define IOP3XX_PERIPHERAL_SIZE 0x00002000
-#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PERIPHERAL_UPPER_VA (IOP3XX_PERIPHERAL_VIRT_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PMMR_PHYS_TO_VIRT(addr) (u32) ((u32) (addr) -\
- (IOP3XX_PERIPHERAL_PHYS_BASE\
- - IOP3XX_PERIPHERAL_VIRT_BASE))
-#define IOP3XX_REG_ADDR(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + (reg))
-
-/* Address Translation Unit */
-#define IOP3XX_ATUVID (volatile u16 *)IOP3XX_REG_ADDR(0x0100)
-#define IOP3XX_ATUDID (volatile u16 *)IOP3XX_REG_ADDR(0x0102)
-#define IOP3XX_ATUCMD (volatile u16 *)IOP3XX_REG_ADDR(0x0104)
-#define IOP3XX_ATUSR (volatile u16 *)IOP3XX_REG_ADDR(0x0106)
-#define IOP3XX_ATURID (volatile u8 *)IOP3XX_REG_ADDR(0x0108)
-#define IOP3XX_ATUCCR (volatile u32 *)IOP3XX_REG_ADDR(0x0109)
-#define IOP3XX_ATUCLSR (volatile u8 *)IOP3XX_REG_ADDR(0x010c)
-#define IOP3XX_ATULT (volatile u8 *)IOP3XX_REG_ADDR(0x010d)
-#define IOP3XX_ATUHTR (volatile u8 *)IOP3XX_REG_ADDR(0x010e)
-#define IOP3XX_ATUBIST (volatile u8 *)IOP3XX_REG_ADDR(0x010f)
-#define IOP3XX_IABAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0110)
-#define IOP3XX_IAUBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0114)
-#define IOP3XX_IABAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0118)
-#define IOP3XX_IAUBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x011c)
-#define IOP3XX_IABAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0120)
-#define IOP3XX_IAUBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0124)
-#define IOP3XX_ASVIR (volatile u16 *)IOP3XX_REG_ADDR(0x012c)
-#define IOP3XX_ASIR (volatile u16 *)IOP3XX_REG_ADDR(0x012e)
-#define IOP3XX_ERBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0130)
-#define IOP3XX_ATUILR (volatile u8 *)IOP3XX_REG_ADDR(0x013c)
-#define IOP3XX_ATUIPR (volatile u8 *)IOP3XX_REG_ADDR(0x013d)
-#define IOP3XX_ATUMGNT (volatile u8 *)IOP3XX_REG_ADDR(0x013e)
-#define IOP3XX_ATUMLAT (volatile u8 *)IOP3XX_REG_ADDR(0x013f)
-#define IOP3XX_IALR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0140)
-#define IOP3XX_IATVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0144)
-#define IOP3XX_ERLR (volatile u32 *)IOP3XX_REG_ADDR(0x0148)
-#define IOP3XX_ERTVR (volatile u32 *)IOP3XX_REG_ADDR(0x014c)
-#define IOP3XX_IALR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0150)
-#define IOP3XX_IALR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0154)
-#define IOP3XX_IATVR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0158)
-#define IOP3XX_OIOWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x015c)
-#define IOP3XX_OMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0160)
-#define IOP3XX_OUMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0164)
-#define IOP3XX_OMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0168)
-#define IOP3XX_OUMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x016c)
-#define IOP3XX_OUDWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x0178)
-#define IOP3XX_ATUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0180)
-#define IOP3XX_PCSR (volatile u32 *)IOP3XX_REG_ADDR(0x0184)
-#define IOP3XX_ATUISR (volatile u32 *)IOP3XX_REG_ADDR(0x0188)
-#define IOP3XX_ATUIMR (volatile u32 *)IOP3XX_REG_ADDR(0x018c)
-#define IOP3XX_IABAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0190)
-#define IOP3XX_IAUBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0194)
-#define IOP3XX_IALR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0198)
-#define IOP3XX_IATVR3 (volatile u32 *)IOP3XX_REG_ADDR(0x019c)
-#define IOP3XX_OCCAR (volatile u32 *)IOP3XX_REG_ADDR(0x01a4)
-#define IOP3XX_OCCDR (volatile u32 *)IOP3XX_REG_ADDR(0x01ac)
-#define IOP3XX_PDSCR (volatile u32 *)IOP3XX_REG_ADDR(0x01bc)
-#define IOP3XX_PMCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01c0)
-#define IOP3XX_PMNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01c1)
-#define IOP3XX_APMCR (volatile u16 *)IOP3XX_REG_ADDR(0x01c2)
-#define IOP3XX_APMCSR (volatile u16 *)IOP3XX_REG_ADDR(0x01c4)
-#define IOP3XX_PCIXCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01e0)
-#define IOP3XX_PCIXNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01e1)
-#define IOP3XX_PCIXCMD (volatile u16 *)IOP3XX_REG_ADDR(0x01e2)
-#define IOP3XX_PCIXSR (volatile u32 *)IOP3XX_REG_ADDR(0x01e4)
-#define IOP3XX_PCIIRSR (volatile u32 *)IOP3XX_REG_ADDR(0x01ec)
-#define IOP3XX_PCSR_OUT_Q_BUSY (1 << 15)
-#define IOP3XX_PCSR_IN_Q_BUSY (1 << 14)
-#define IOP3XX_ATUCR_OUT_EN (1 << 1)
-
-#define IOP3XX_INIT_ATU_DEFAULT 0
-#define IOP3XX_INIT_ATU_DISABLE -1
-#define IOP3XX_INIT_ATU_ENABLE 1
-
-/* Messaging Unit */
-#define IOP3XX_IMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0310)
-#define IOP3XX_IMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0314)
-#define IOP3XX_OMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0318)
-#define IOP3XX_OMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x031c)
-#define IOP3XX_IDR (volatile u32 *)IOP3XX_REG_ADDR(0x0320)
-#define IOP3XX_IISR (volatile u32 *)IOP3XX_REG_ADDR(0x0324)
-#define IOP3XX_IIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0328)
-#define IOP3XX_ODR (volatile u32 *)IOP3XX_REG_ADDR(0x032c)
-#define IOP3XX_OISR (volatile u32 *)IOP3XX_REG_ADDR(0x0330)
-#define IOP3XX_OIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0334)
-#define IOP3XX_MUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0350)
-#define IOP3XX_QBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0354)
-#define IOP3XX_IFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0360)
-#define IOP3XX_IFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0364)
-#define IOP3XX_IPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0368)
-#define IOP3XX_IPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x036c)
-#define IOP3XX_OFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0370)
-#define IOP3XX_OFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0374)
-#define IOP3XX_OPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0378)
-#define IOP3XX_OPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x037c)
-#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
-
-/* DMA Controller */
-#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
- (0x400 + (chan << 6)))
-#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
-
-/* Peripheral bus interface */
-#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
-#define IOP3XX_PBISR (volatile u32 *)IOP3XX_REG_ADDR(0x0684)
-#define IOP3XX_PBBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0688)
-#define IOP3XX_PBLR0 (volatile u32 *)IOP3XX_REG_ADDR(0x068c)
-#define IOP3XX_PBBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0690)
-#define IOP3XX_PBLR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0694)
-#define IOP3XX_PBBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0698)
-#define IOP3XX_PBLR2 (volatile u32 *)IOP3XX_REG_ADDR(0x069c)
-#define IOP3XX_PBBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a0)
-#define IOP3XX_PBLR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a4)
-#define IOP3XX_PBBAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06a8)
-#define IOP3XX_PBLR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06ac)
-#define IOP3XX_PBBAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b0)
-#define IOP3XX_PBLR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b4)
-#define IOP3XX_PMBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x06c0)
-#define IOP3XX_PMBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x06e0)
-#define IOP3XX_PMBR2 (volatile u32 *)IOP3XX_REG_ADDR(0x06e4)
-
-/* Peripheral performance monitoring unit */
-#define IOP3XX_GTMR (volatile u32 *)IOP3XX_REG_ADDR(0x0700)
-#define IOP3XX_ESR (volatile u32 *)IOP3XX_REG_ADDR(0x0704)
-#define IOP3XX_EMISR (volatile u32 *)IOP3XX_REG_ADDR(0x0708)
-#define IOP3XX_GTSR (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-/* PERCR0 DOESN'T EXIST - index from 1! */
-#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-
-/* Timers */
-#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
-#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
-#define IOP3XX_TU_TCR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0008)
-#define IOP3XX_TU_TCR1 (volatile u32 *)IOP3XX_TIMER_REG(0x000c)
-#define IOP3XX_TU_TRR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0010)
-#define IOP3XX_TU_TRR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0014)
-#define IOP3XX_TU_TISR (volatile u32 *)IOP3XX_TIMER_REG(0x0018)
-#define IOP3XX_TU_WDTCR (volatile u32 *)IOP3XX_TIMER_REG(0x001c)
-#define IOP_TMR_EN 0x02
-#define IOP_TMR_RELOAD 0x04
-#define IOP_TMR_PRIVILEGED 0x08
-#define IOP_TMR_RATIO_1_1 0x00
-
-/* Watchdog timer definitions */
-#define IOP_WDTCR_EN_ARM 0x1e1e1e1e
-#define IOP_WDTCR_EN 0xe1e1e1e1
-/* iop3xx does not support stopping the watchdog, so we just re-arm */
-#define IOP_WDTCR_DIS_ARM (IOP_WDTCR_EN_ARM)
-#define IOP_WDTCR_DIS (IOP_WDTCR_EN)
-
-/* Application accelerator unit */
-#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
-#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
-
-/* I2C bus interface unit */
-#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
-#define IOP3XX_ISR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1684)
-#define IOP3XX_ISAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1688)
-#define IOP3XX_IDBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x168c)
-#define IOP3XX_IBMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1694)
-#define IOP3XX_ICR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a0)
-#define IOP3XX_ISR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a4)
-#define IOP3XX_ISAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a8)
-#define IOP3XX_IDBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16ac)
-#define IOP3XX_IBMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16b4)
-
-
-/*
- * IOP3XX I/O and Mem space regions for PCI autoconfiguration
- */
-#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
-#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
-
-#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
-#define IOP3XX_PCI_LOWER_IO_BA 0x00000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/reboot.h>
-
-void iop3xx_map_io(void);
-void iop_init_cp6_handler(void);
-void iop_init_time(unsigned long tickrate);
-void iop3xx_restart(enum reboot_mode, const char *);
-
-static inline u32 read_tmr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tmr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c0, c1, 0" : : "r" (val));
-}
-
-static inline void write_tmr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c1, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c2, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr1(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c3, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c5, c1, 0" : : "r" (val));
-}
-
-static inline void write_tisr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_wdtcr(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c7, c1, 0":"=r" (val));
- return val;
-}
-static inline void write_wdtcr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c7, c1, 0"::"r" (val));
-}
-
-extern unsigned long get_iop_tick_rate(void);
-
-/* only iop13xx has these registers, we define these to present a
- * common register interface for the iop_wdt driver.
- */
-#define IOP_RCSR_WDT (0)
-static inline u32 read_rcsr(void)
-{
- return 0;
-}
-static inline void write_wdtsr(u32 val)
-{
- do { } while (0);
-}
-
-extern struct platform_device iop3xx_dma_0_channel;
-extern struct platform_device iop3xx_dma_1_channel;
-extern struct platform_device iop3xx_aau_channel;
-extern struct platform_device iop3xx_i2c0_device;
-extern struct platform_device iop3xx_i2c1_device;
-
-#endif
-
-
-#endif
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
deleted file mode 100644
index 250760e08103..000000000000
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef IOP_ADMA_H
-#define IOP_ADMA_H
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-
-#define IOP_ADMA_SLOT_SIZE 32
-#define IOP_ADMA_THRESHOLD 4
-#ifdef DEBUG
-#define IOP_PARANOIA 1
-#else
-#define IOP_PARANOIA 0
-#endif
-#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x))
-
-/**
- * struct iop_adma_device - internal representation of an ADMA device
- * @pdev: Platform device
- * @id: HW ADMA Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
-struct iop_adma_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
-};
-
-/**
- * struct iop_adma_chan - internal representation of an ADMA device
- * @pending: allows batching of hardware operations
- * @lock: serializes enqueue/dequeue operations to the slot pool
- * @mmr_base: memory mapped register base
- * @chain: device chain view of the descriptors
- * @device: parent device
- * @common: common dmaengine channel object members
- * @last_used: place holder for allocation to continue from where it left off
- * @all_slots: complete domain of slots usable by the channel
- * @slots_allocated: records the actual size of the descriptor slot pool
- * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
- */
-struct iop_adma_chan {
- int pending;
- spinlock_t lock; /* protects the descriptor slot pool */
- void __iomem *mmr_base;
- struct list_head chain;
- struct iop_adma_device *device;
- struct dma_chan common;
- struct iop_adma_desc_slot *last_used;
- struct list_head all_slots;
- int slots_allocated;
- struct tasklet_struct irq_tasklet;
-};
-
-/**
- * struct iop_adma_desc_slot - IOP-ADMA software descriptor
- * @slot_node: node on the iop_adma_chan.all_slots list
- * @chain_node: node on the op_adma_chan.chain list
- * @hw_desc: virtual address of the hardware descriptor chain
- * @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @slots_per_op: number of slots per operation
- * @idx: pool index
- * @tx_list: list of descriptors that are associated with one operation
- * @async_tx: support for the async_tx api
- * @group_list: list of slots that make up a multi-descriptor transaction
- * for example transfer lengths larger than the supported hw max
- * @xor_check_result: result of zero sum
- * @crc32_result: result crc calculation
- */
-struct iop_adma_desc_slot {
- struct list_head slot_node;
- struct list_head chain_node;
- void *hw_desc;
- struct iop_adma_desc_slot *group_head;
- u16 slot_cnt;
- u16 slots_per_op;
- u16 idx;
- struct list_head tx_list;
- struct dma_async_tx_descriptor async_tx;
- union {
- u32 *xor_check_result;
- u32 *crc32_result;
- u32 *pq_check_result;
- };
-};
-
-struct iop_adma_platform_data {
- int hw_id;
- dma_cap_mask_t cap_mask;
- size_t pool_size;
-};
-
-#define to_iop_sw_desc(addr_hw_desc) \
- container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
-#define iop_hw_desc_slot_idx(hw_desc, idx) \
- ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
-#endif
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
deleted file mode 100644
index 076777ff3daa..000000000000
--- a/arch/arm/include/asm/hardware/it8152.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * linux/include/arm/hardware/it8152.h
- *
- * Copyright Compulab Ltd., 2006,2007
- * Mike Rapoport <mike@compulab.co.il>
- *
- * ITE 8152 companion chip register definitions
- */
-
-#ifndef __ASM_HARDWARE_IT8152_H
-#define __ASM_HARDWARE_IT8152_H
-
-#include <mach/irqs.h>
-
-extern void __iomem *it8152_base_address;
-
-#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
-#define IT8152_CFGREG_BASE (it8152_base_address + 0x03f00000)
-
-#define __REG_IT8152(x) (it8152_base_address + (x))
-
-#define IT8152_PCI_CFG_ADDR __REG_IT8152(0x3f00800)
-#define IT8152_PCI_CFG_DATA __REG_IT8152(0x3f00804)
-
-#define IT8152_INTC_LDCNIRR __REG_IT8152(0x3f00300)
-#define IT8152_INTC_LDPNIRR __REG_IT8152(0x3f00304)
-#define IT8152_INTC_LDCNIMR __REG_IT8152(0x3f00308)
-#define IT8152_INTC_LDPNIMR __REG_IT8152(0x3f0030C)
-#define IT8152_INTC_LDNITR __REG_IT8152(0x3f00310)
-#define IT8152_INTC_LDNIAR __REG_IT8152(0x3f00314)
-#define IT8152_INTC_LPCNIRR __REG_IT8152(0x3f00320)
-#define IT8152_INTC_LPPNIRR __REG_IT8152(0x3f00324)
-#define IT8152_INTC_LPCNIMR __REG_IT8152(0x3f00328)
-#define IT8152_INTC_LPPNIMR __REG_IT8152(0x3f0032C)
-#define IT8152_INTC_LPNITR __REG_IT8152(0x3f00330)
-#define IT8152_INTC_LPNIAR __REG_IT8152(0x3f00334)
-#define IT8152_INTC_PDCNIRR __REG_IT8152(0x3f00340)
-#define IT8152_INTC_PDPNIRR __REG_IT8152(0x3f00344)
-#define IT8152_INTC_PDCNIMR __REG_IT8152(0x3f00348)
-#define IT8152_INTC_PDPNIMR __REG_IT8152(0x3f0034C)
-#define IT8152_INTC_PDNITR __REG_IT8152(0x3f00350)
-#define IT8152_INTC_PDNIAR __REG_IT8152(0x3f00354)
-#define IT8152_INTC_INTC_TYPER __REG_IT8152(0x3f003FC)
-
-#define IT8152_GPIO_GPDR __REG_IT8152(0x3f00500)
-
-/*
- Interrupt controller per register summary:
- ---------------------------------------
- LCDNIRR:
- IT8152_LD_IRQ(8) PCICLK stop
- IT8152_LD_IRQ(7) MCLK ready
- IT8152_LD_IRQ(6) s/w
- IT8152_LD_IRQ(5) UART
- IT8152_LD_IRQ(4) GPIO
- IT8152_LD_IRQ(3) TIMER 4
- IT8152_LD_IRQ(2) TIMER 3
- IT8152_LD_IRQ(1) TIMER 2
- IT8152_LD_IRQ(0) TIMER 1
-
- LPCNIRR:
- IT8152_LP_IRQ(x) serial IRQ x
-
- PCIDNIRR:
- IT8152_PD_IRQ(14) PCISERR
- IT8152_PD_IRQ(13) CPU/PCI bridge target abort (h2pTADR)
- IT8152_PD_IRQ(12) CPU/PCI bridge master abort (h2pMADR)
- IT8152_PD_IRQ(11) PCI INTD
- IT8152_PD_IRQ(10) PCI INTC
- IT8152_PD_IRQ(9) PCI INTB
- IT8152_PD_IRQ(8) PCI INTA
- IT8152_PD_IRQ(7) serial INTD
- IT8152_PD_IRQ(6) serial INTC
- IT8152_PD_IRQ(5) serial INTB
- IT8152_PD_IRQ(4) serial INTA
- IT8152_PD_IRQ(3) serial IRQ IOCHK (IOCHKR)
- IT8152_PD_IRQ(2) chaining DMA (CDMAR)
- IT8152_PD_IRQ(1) USB (USBR)
- IT8152_PD_IRQ(0) Audio controller (ACR)
- */
-#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
-#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
-
-/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
-#define IT8152_LD_IRQ_COUNT 9
-#define IT8152_LP_IRQ_COUNT 16
-#define IT8152_PD_IRQ_COUNT 15
-
-/* Priorities: */
-#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
-#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
-#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
-
-/* frequently used interrupts */
-#define IT8152_PCISERR IT8152_PD_IRQ(14)
-#define IT8152_H2PTADR IT8152_PD_IRQ(13)
-#define IT8152_H2PMAR IT8152_PD_IRQ(12)
-#define IT8152_PCI_INTD IT8152_PD_IRQ(11)
-#define IT8152_PCI_INTC IT8152_PD_IRQ(10)
-#define IT8152_PCI_INTB IT8152_PD_IRQ(9)
-#define IT8152_PCI_INTA IT8152_PD_IRQ(8)
-#define IT8152_CDMA_INT IT8152_PD_IRQ(2)
-#define IT8152_USB_INT IT8152_PD_IRQ(1)
-#define IT8152_AUDIO_INT IT8152_PD_IRQ(0)
-
-struct pci_dev;
-struct pci_sys_data;
-
-extern void it8152_irq_demux(struct irq_desc *desc);
-extern void it8152_init_irq(void);
-extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_ops it8152_ops;
-
-#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 74e51d6bd93f..3190e1e5067a 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -158,8 +158,6 @@
#define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4)
#define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7))
-extern struct bus_type locomo_bus_type;
-
#define LOCOMO_DEVID_KEYBOARD 0
#define LOCOMO_DEVID_FRONTLIGHT 1
#define LOCOMO_DEVID_BACKLIGHT 2
@@ -188,16 +186,14 @@ struct locomo_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct locomo_dev *);
- int (*remove)(struct locomo_dev *);
- int (*suspend)(struct locomo_dev *, pm_message_t);
- int (*resume)(struct locomo_dev *);
+ void (*remove)(struct locomo_dev *);
};
-#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
+#define LOCOMO_DRV(_d) container_of_const((_d), struct locomo_driver, drv)
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
-void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+extern void locomolcd_power(int on);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
diff --git a/arch/arm/include/asm/hardware/memc.h b/arch/arm/include/asm/hardware/memc.h
index 42ba7c167d1f..1d4ebe0a9678 100644
--- a/arch/arm/include/asm/hardware/memc.h
+++ b/arch/arm/include/asm/hardware/memc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/memc.h
*
* Copyright (C) Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define VDMA_ALIGNMENT PAGE_SIZE
#define VDMA_XFERSIZE 16
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 8979fa3bbf2d..90b6a832108d 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/hardware/sa1111.h
*
@@ -12,35 +13,6 @@
#ifndef _ASM_ARCH_SA1111
#define _ASM_ARCH_SA1111
-#include <mach/bitfield.h>
-
-/*
- * The SA1111 is always located at virtual 0xf4000000, and is always
- * "native" endian.
- */
-
-#define SA1111_VBASE 0xf4000000
-
-/* Don't use these! */
-#define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE)
-#define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE)
-
-#ifndef __ASSEMBLY__
-#define _SA1111(x) ((x) + sa1111->resource.start)
-#endif
-
-#define sa1111_writel(val,addr) __raw_writel(val, addr)
-#define sa1111_readl(addr) __raw_readl(addr)
-
-/*
- * 26 bits of the SA-1110 address bus are available to the SA-1111.
- * Use these when feeding target addresses to the DMA engines.
- */
-
-#define SA1111_ADDR_WIDTH (26)
-#define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1)
-#define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK)
-
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
@@ -396,7 +368,7 @@
-extern struct bus_type sa1111_bus_type;
+extern const struct bus_type sa1111_bus_type;
#define SA1111_DEVID_SBI (1 << 0)
#define SA1111_DEVID_SK (1 << 1)
@@ -416,7 +388,7 @@ struct sa1111_dev {
struct resource res;
void __iomem *mapbase;
unsigned int skpcr_mask;
- unsigned int irq[6];
+ unsigned int hwirq[6];
u64 dma_mask;
};
@@ -429,13 +401,10 @@ struct sa1111_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct sa1111_dev *);
- int (*remove)(struct sa1111_dev *);
- int (*suspend)(struct sa1111_dev *, pm_message_t);
- int (*resume)(struct sa1111_dev *);
- void (*shutdown)(struct sa1111_dev *);
+ void (*remove)(struct sa1111_dev *);
};
-#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
+#define SA1111_DRV(_d) container_of_const((_d), struct sa1111_driver, drv)
#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
@@ -462,10 +431,6 @@ int sa1111_check_dma_bug(dma_addr_t addr);
int sa1111_driver_register(struct sa1111_driver *);
void sa1111_driver_unregister(struct sa1111_driver *);
-void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir);
-void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
-void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
-
struct sa1111_platform_data {
int irq_base; /* base for cascaded on-chip IRQs */
unsigned disable_devs;
diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h
index 58cdf5d84122..505453315287 100644
--- a/arch/arm/include/asm/hardware/scoop.h
+++ b/arch/arm/include/asm/hardware/scoop.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define SCOOP_MCR 0x00
diff --git a/arch/arm/include/asm/hardware/ssp.h b/arch/arm/include/asm/hardware/ssp.h
index 3b42e181997c..72d176790308 100644
--- a/arch/arm/include/asm/hardware/ssp.h
+++ b/arch/arm/include/asm/hardware/ssp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ssp.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef SSP_H
#define SSP_H
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 0a0e2d1784c0..bdb209e002a4 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
-#include <asm/kmap_types.h>
+#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
@@ -9,8 +11,6 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
-
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
@@ -18,10 +18,6 @@
} while (0)
extern pte_t *pkmap_page_table;
-extern pte_t *fixmap_page_table;
-
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
@@ -50,24 +46,33 @@ extern void kunmap_high(struct page *page);
#endif
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-extern void *kmap_high_get(struct page *page);
-#else
-static inline void *kmap_high_get(struct page *page)
+extern void *kmap_high_get(const struct page *page);
+
+static inline void *arch_kmap_local_high_get(const struct page *page)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
+ return NULL;
+ return kmap_high_get(page);
+}
+#define arch_kmap_local_high_get arch_kmap_local_high_get
+
+#else /* ARCH_NEEDS_KMAP_HIGH_GET */
+static inline void *kmap_high_get(const struct page *page)
{
return NULL;
}
-#endif
+#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
-/*
- * The following functions are already defined by <linux/highmem.h>
- * when CONFIG_HIGHMEM is not set.
- */
-#ifdef CONFIG_HIGHMEM
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
-#endif
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+do { \
+ if (cache_is_vivt()) \
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
+} while (0)
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_page(vaddr)
#endif
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
index d4014fbe5ea3..87d48e2d90ad 100644
--- a/arch/arm/include/asm/hugetlb-3level.h
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hugetlb-3level.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
@@ -25,11 +13,12 @@
/*
* If our huge pte is non-zero then mark the valid bit.
- * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * This allows pte_present(huge_ptep_get(mm,addr,ptep)) to return true for non-zero
* ptes.
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
*/
-static inline pte_t huge_ptep_get(pte_t *ptep)
+#define __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t retval = *ptep;
if (pte_val(retval))
@@ -37,35 +26,4 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return retval;
}
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- set_pte_at(mm, addr, ptep, pte);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
-
#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index 7d26f6c4f0f5..700055b1ccb3 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -1,71 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hugetlb.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_H
#define _ASM_ARM_HUGETLB_H
+#include <asm/cacheflush.h>
#include <asm/page.h>
-#include <asm-generic/hugetlb.h>
-
#include <asm/hugetlb-3level.h>
+#include <asm-generic/hugetlb.h>
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor,
- unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-
-
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr, unsigned long len)
-{
- return 0;
-}
-
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- return 0;
-}
-
-static inline int huge_pte_none(pte_t pte)
-{
- return pte_none(pte);
-}
-
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index afcaf8bf971b..e7f9961c53b2 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_HW_BREAKPOINT_H
#define _ARM_HW_BREAKPOINT_H
@@ -52,6 +53,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_1 7
+#define ARM_DEBUG_ARCH_V8_2 8
+#define ARM_DEBUG_ARCH_V8_4 9
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
@@ -80,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
+#define ARM_ENTRY_CFI_BREAKPOINT 0x3
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */
@@ -110,14 +115,17 @@ static inline void decode_ctrl_reg(u32 reg,
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
+struct perf_event_attr;
struct notifier_block;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index 9beb92914f4d..cecc13214ef1 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Nothing to see here yet
*/
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6e183fd269fb..e31d9f1b8549 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_HWCAP_H
#define __ASMARM_HWCAP_H
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index b90d9e523d6f..8a648e506540 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -1,6 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_HYPERVISOR_H
#define _ASM_ARM_HYPERVISOR_H
#include <asm/xen/hypervisor.h>
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
+static inline void kvm_arch_init_hyp_services(void) { };
+
#endif
diff --git a/arch/arm/include/asm/ide.h b/arch/arm/include/asm/ide.h
deleted file mode 100644
index b507ce8e5019..000000000000
--- a/arch/arm/include/asm/ide.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/include/asm/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the ARM architecture specific IDE code.
- */
-
-#ifndef __ASMARM_IDE_H
-#define __ASMARM_IDE_H
-
-#ifdef __KERNEL__
-
-#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
-#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
-#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
-#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMARM_IDE_H */
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..baebb67b3512 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
-#define __idmap __section(.idmap.text) noinline notrace
+#define __idmap __section(".idmap.text") noinline notrace
extern pgd_t *idmap_pgd;
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
index e96065da4dae..faf3d1c28368 100644
--- a/arch/arm/include/asm/insn.h
+++ b/arch/arm/include/asm/insn.h
@@ -1,6 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_INSN_H
#define __ASM_ARM_INSN_H
+#include <linux/types.h>
+
+/*
+ * Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
+ * appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
+ * which should be sufficient for the core kernel as well as modules loaded
+ * into the module region. (Not supported by LLD before release 14)
+ */
+#define LOAD_SYM_ARMV6(reg, sym) \
+ " .globl " #sym " \n\t" \
+ " .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
+ " .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
+ " .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
+ "10: sub " #reg ", pc, #8 \n\t" \
+ "11: sub " #reg ", " #reg ", #4 \n\t" \
+ "12: ldr " #reg ", [" #reg ", #0] \n\t"
+
static inline unsigned long
arm_gen_nop(void)
{
@@ -12,18 +30,18 @@ arm_gen_nop(void)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
static inline unsigned long
arm_gen_branch(unsigned long pc, unsigned long addr)
{
- return __arm_gen_branch(pc, addr, false);
+ return __arm_gen_branch(pc, addr, false, true);
}
static inline unsigned long
-arm_gen_branch_link(unsigned long pc, unsigned long addr)
+arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
{
- return __arm_gen_branch(pc, addr, true);
+ return __arm_gen_branch(pc, addr, true, warn);
}
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 2cfbc531f63b..bae5edf348ef 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/io.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Modifications:
* 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
* constant addresses and variable addresses.
@@ -26,15 +23,13 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm-generic/pci_iomap.h>
-#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
@@ -143,11 +138,10 @@ extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
-extern void __iounmap(volatile void __iomem *addr);
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
-extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
@@ -178,13 +172,16 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define PCI_IO_VIRT_BASE 0xfee00000
#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
-#if defined(CONFIG_PCI)
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
void pci_ioremap_set_mem_type(int mem_type);
#else
static inline void pci_ioremap_set_mem_type(int mem_type) {}
#endif
-extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+struct resource;
+
+#define pci_remap_iospace pci_remap_iospace
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
/*
* PCI configuration space mapping function.
@@ -201,32 +198,13 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
-#elif defined(CONFIG_PCI)
-#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
-#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
-#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
-#endif
-
-/*
- * This is the limit of PC card/PCI/ISA IO space, which is by default
- * 64K if we have PC card, PCI or ISA support. Otherwise, default to
- * zero to prevent ISA/PCI drivers claiming IO space (and potentially
- * oopsing.)
- *
- * Only set this larger if you really need inb() et.al. to operate over
- * a larger address space. Note that SOC_COMMON ioremaps each sockets
- * IO space area, and so inb() et.al. must be defined to operate as per
- * readb() et.al. on such platforms.
- */
-#ifndef IO_SPACE_LIMIT
-#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
-#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#endif
/*
@@ -282,8 +260,6 @@ extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
extern void _memset_io(volatile void __iomem *, int, size_t);
-#define mmiowb()
-
/*
* Memory access primitives
* ------------------------
@@ -363,7 +339,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*
* Function Memory type Cacheability Cache hint
* ioremap() Device n/a n/a
- * ioremap_nocache() Device n/a n/a
* ioremap_cache() Normal Writeback Read allocate
* ioremap_wc() Normal Non-cacheable n/a
* ioremap_wt() Normal Non-cacheable n/a
@@ -375,13 +350,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
* - unaligned accesses are "unpredictable"
* - writes may be delayed before they hit the endpoint device
*
- * ioremap_nocache() is the same as ioremap() as there are too many device
- * drivers using this for device registers, and documentation which tells
- * people to use it for such for this to be any different. This is not a
- * safe fallback for memory-like mappings, or memory regions where the
- * compiler may generate unaligned accesses - eg, via inlining its own
- * memcpy.
- *
* All normal memory mappings have the following properties:
* - reads can be repeated with no side effects
* - repeated reads return the last value written
@@ -399,7 +367,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*/
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
-#define ioremap_nocache ioremap
/*
* Do not use ioremap_cache for mapping memory. Use memremap instead.
@@ -407,20 +374,14 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
-/*
- * Do not use ioremap_cached in new code. Provided for the benefit of
- * the pxa2xx-flash MTD driver only.
- */
-void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
-
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
-void iounmap(volatile void __iomem *iomem_cookie);
+void iounmap(volatile void __iomem *io_addr);
#define iounmap iounmap
-void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
#define arch_memremap_wb arch_memremap_wb
/*
@@ -446,38 +407,15 @@ struct pci_dev;
#define pci_iounmap pci_iounmap
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#include <asm-generic/io.h>
-/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
-extern int devmem_is_allowed(unsigned long pfn);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif
/*
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index e53638c8ed8a..26c1d2ced4ce 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_IRQ_H
#define __ASM_ARM_IRQ_H
@@ -24,20 +25,14 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
-extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
-void init_IRQ(void);
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self);
+ int exclude_cpu);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h
index 712d03e5973a..8895999834cc 100644
--- a/arch/arm/include/asm/irq_work.h
+++ b/arch/arm/include/asm/irq_work.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_IRQ_WORK_H
#define __ASM_ARM_IRQ_WORK_H
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index e6b70d9d084e..aeec7f24eb75 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_IRQFLAGS_H
#define __ASM_ARM_IRQFLAGS_H
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 34f7b6980d21..a35aba7f548c 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H
@@ -8,13 +9,17 @@
#define JUMP_LABEL_NOP_SIZE 4
+/* This macro is also expanded on the Rust side. */
+#define ARCH_STATIC_BRANCH_ASM(key, label) \
+ "1:\n\t" \
+ WASM(nop) "\n\t" \
+ ".pushsection __jump_table, \"aw\"\n\t" \
+ ".word 1b, " label ", " key "\n\t" \
+ ".popsection\n\t" \
+
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
- WASM(nop) "\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".word 1b, %l[l_yes], %c0\n\t"
- ".popsection\n\t"
+ asm goto(ARCH_STATIC_BRANCH_ASM("%c0", "%l[l_yes]")
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
@@ -24,7 +29,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 000000000000..303c35df3135
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * The compiler uses a shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+asmlinkage void kasan_early_init(void);
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 000000000000..5739605aa7cf
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
+ * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
+ * addressable by a 32bit architecture) out of the virtual address
+ * space to use as shadow memory for KASan as follows:
+ *
+ * +----+ 0xffffffff
+ * | | \
+ * | | |-> Static kernel image (vmlinux) BSS and page table
+ * | |/
+ * +----+ PAGE_OFFSET
+ * | | \
+ * | | |-> Loadable kernel modules virtual address space area
+ * | |/
+ * +----+ MODULES_VADDR = KASAN_SHADOW_END
+ * | | \
+ * | | |-> The shadow area of kernel virtual address.
+ * | |/
+ * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULES_VADDR
+ * | | |
+ * | | |
+ * | | |-> The user space area in lowmem. The kernel address
+ * | | | sanitizer do not use this space, nor does it map it.
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | |/
+ * ------ 0
+ *
+ * 1) KASAN_SHADOW_START
+ * This value begins with the MODULE_VADDR's shadow address. It is the
+ * start of kernel virtual space. Since we have modules to load, we need
+ * to cover also that area with shadow memory so we can find memory
+ * bugs in modules.
+ *
+ * 2) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address: the mapping that would
+ * be after the end of the kernel memory at 0xffffffff. It is the end of
+ * kernel address sanitizer shadow area. It is also the start of the
+ * module area.
+ *
+ * 3) KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * As you would expect, >> 3 is equal to dividing by 8, meaning each
+ * byte in the shadow memory covers 8 bytes of kernel memory, so one
+ * bit shadow memory per byte of kernel memory is used.
+ *
+ * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
+ * on the VMSPLIT layout of the system: the kernel and userspace can
+ * split up lowmem in different ways according to needs, so we calculate
+ * the shadow offset depending on this.
+ */
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 25021b798a1e..a8287e7ab9d4 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H
-#ifdef CONFIG_KEXEC
-
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
@@ -55,9 +54,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
}
-/* Function pointer to optional machine-specific reinitialization */
-extern void (*kexec_reinit)(void);
-
static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
{
return phys_to_idmap(phys);
@@ -84,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_KEXEC */
-
#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644
index 000000000000..7980d0f2271f
--- /dev/null
+++ b/arch/arm/include/asm/kfence.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+ int i;
+ unsigned long pfn = PFN_DOWN(__pa(addr));
+ pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+ if (!pte)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+ pmd_t *pmd;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ pmd = pmd_off_k(addr);
+
+ if (pmd_leaf(*pmd)) {
+ if (split_pmd_page(pmd, addr & PMD_MASK))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ set_memory_valid(addr, 1, !protect);
+
+ return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 0a9d5dd93294..8de1100d1067 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM KGDB support
*
@@ -76,7 +77,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
-#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
+#define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
deleted file mode 100644
index 83eb2f772911..000000000000
--- a/arch/arm/include/asm/kmap_types.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ARM_KMAP_TYPES_H
-#define __ARM_KMAP_TYPES_H
-
-/*
- * This is the "bare minimum". AIO seems to require this.
- */
-#define KM_TYPE_NR 16
-
-#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 59655459da59..5b8dbf1b0be4 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/kprobes.h
*
* Copyright (C) 2006, 2007 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KPROBES_H
@@ -44,30 +36,26 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
-extern __visible kprobe_opcode_t optprobe_template_add_sp;
-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
-extern __visible kprobe_opcode_t optprobe_template_restore_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
#define MAX_OPTIMIZED_LENGTH 4
#define MAX_OPTINSN_SIZE \
- ((unsigned long)&optprobe_template_end - \
- (unsigned long)&optprobe_template_entry)
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
#define RELATIVEJUMP_SIZE 4
struct arch_optimized_insn {
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 000000000000..a5f2cdd6445f
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
deleted file mode 100644
index ebf020b02bc8..000000000000
--- a/arch/arm/include/asm/kvm_arm.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_ARM_H__
-#define __ARM_KVM_ARM_H__
-
-#include <linux/const.h>
-#include <linux/types.h>
-
-/* Hyp Configuration Register (HCR) bits */
-#define HCR_TGE (1 << 27)
-#define HCR_TVM (1 << 26)
-#define HCR_TTLB (1 << 25)
-#define HCR_TPU (1 << 24)
-#define HCR_TPC (1 << 23)
-#define HCR_TSW (1 << 22)
-#define HCR_TAC (1 << 21)
-#define HCR_TIDCP (1 << 20)
-#define HCR_TSC (1 << 19)
-#define HCR_TID3 (1 << 18)
-#define HCR_TID2 (1 << 17)
-#define HCR_TID1 (1 << 16)
-#define HCR_TID0 (1 << 15)
-#define HCR_TWE (1 << 14)
-#define HCR_TWI (1 << 13)
-#define HCR_DC (1 << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (1 << 10)
-#define HCR_FB (1 << 9)
-#define HCR_VA (1 << 8)
-#define HCR_VI (1 << 7)
-#define HCR_VF (1 << 6)
-#define HCR_AMO (1 << 5)
-#define HCR_IMO (1 << 4)
-#define HCR_FMO (1 << 3)
-#define HCR_PTW (1 << 2)
-#define HCR_SWIO (1 << 1)
-#define HCR_VM 1
-
-/*
- * The bits we set in HCR:
- * TAC: Trap ACTLR
- * TSC: Trap SMC
- * TVM: Trap VM ops (until MMU and caches are on)
- * TSW: Trap cache operations by set/way
- * TWI: Trap WFI
- * TWE: Trap WFE
- * TIDCP: Trap L2CTLR/L2ECTLR
- * BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
- * AMO: Override CPSR.A and enable signaling with VA
- * IMO: Override CPSR.I and enable signaling with VI
- * FMO: Override CPSR.F and enable signaling with VF
- * SWIO: Turn set/way invalidates into set/way clean+invalidate
- */
-#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
-
-/* System Control Register (SCTLR) bits */
-#define SCTLR_TE (1 << 30)
-#define SCTLR_EE (1 << 25)
-#define SCTLR_V (1 << 13)
-
-/* Hyp System Control Register (HSCTLR) bits */
-#define HSCTLR_TE (1 << 30)
-#define HSCTLR_EE (1 << 25)
-#define HSCTLR_FI (1 << 21)
-#define HSCTLR_WXN (1 << 19)
-#define HSCTLR_I (1 << 12)
-#define HSCTLR_C (1 << 2)
-#define HSCTLR_A (1 << 1)
-#define HSCTLR_M 1
-#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
- HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
-
-/* TTBCR and HTCR Registers bits */
-#define TTBCR_EAE (1 << 31)
-#define TTBCR_IMP (1 << 30)
-#define TTBCR_SH1 (3 << 28)
-#define TTBCR_ORGN1 (3 << 26)
-#define TTBCR_IRGN1 (3 << 24)
-#define TTBCR_EPD1 (1 << 23)
-#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (7 << 16)
-#define TTBCR_SH0 (3 << 12)
-#define TTBCR_ORGN0 (3 << 10)
-#define TTBCR_IRGN0 (3 << 8)
-#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ (7 << 0)
-#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
-
-/* Hyp System Trap Register */
-#define HSTR_T(x) (1 << x)
-#define HSTR_TTEE (1 << 16)
-#define HSTR_TJDBX (1 << 17)
-
-/* Hyp Coprocessor Trap Register */
-#define HCPTR_TCP(x) (1 << x)
-#define HCPTR_TCP_MASK (0x3fff)
-#define HCPTR_TASE (1 << 15)
-#define HCPTR_TTA (1 << 20)
-#define HCPTR_TCPAC (1 << 31)
-
-/* Hyp Debug Configuration Register bits */
-#define HDCR_TDRA (1 << 11)
-#define HDCR_TDOSA (1 << 10)
-#define HDCR_TDA (1 << 9)
-#define HDCR_TDE (1 << 8)
-#define HDCR_HPME (1 << 7)
-#define HDCR_TPM (1 << 6)
-#define HDCR_TPMCR (1 << 5)
-#define HDCR_HPMN_MASK (0x1F)
-
-/*
- * The architecture supports 40-bit IPA as input to the 2nd stage translations
- * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
- * space.
- */
-#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
-#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
-
-/* Virtualization Translation Control Register (VTCR) bits */
-#define VTCR_SH0 (3 << 12)
-#define VTCR_ORGN0 (3 << 10)
-#define VTCR_IRGN0 (3 << 8)
-#define VTCR_SL0 (3 << 6)
-#define VTCR_S (1 << 4)
-#define VTCR_T0SZ (0xf)
-#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
- VTCR_S | VTCR_T0SZ)
-#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
-#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
-#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
-#define KVM_VTCR_SL0 VTCR_SL_L1
-/* stage-2 input address range defined as 2^(32-T0SZ) */
-#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
-#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
-#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
-
-/* Virtualization Translation Table Base Register (VTTBR) bits */
-#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
-#define VTTBR_X (14 - KVM_T0SZ)
-#else
-#define VTTBR_X (5 - KVM_T0SZ)
-#endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT _AC(48, ULL)
-#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
-
-/* Hyp Syndrome Register (HSR) bits */
-#define HSR_EC_SHIFT (26)
-#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
-#define HSR_IL (_AC(1, UL) << 25)
-#define HSR_ISS (HSR_IL - 1)
-#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
-#define HSR_SRT_SHIFT (16)
-#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
-#define HSR_FSC (0x3f)
-#define HSR_FSC_TYPE (0x3c)
-#define HSR_SSE (1 << 21)
-#define HSR_WNR (1 << 6)
-#define HSR_CV_SHIFT (24)
-#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
-#define HSR_COND_SHIFT (20)
-#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
-
-#define FSC_FAULT (0x04)
-#define FSC_ACCESS (0x08)
-#define FSC_PERM (0x0c)
-#define FSC_SEA (0x10)
-#define FSC_SEA_TTW0 (0x14)
-#define FSC_SEA_TTW1 (0x15)
-#define FSC_SEA_TTW2 (0x16)
-#define FSC_SEA_TTW3 (0x17)
-#define FSC_SECC (0x18)
-#define FSC_SECC_TTW0 (0x1c)
-#define FSC_SECC_TTW1 (0x1d)
-#define FSC_SECC_TTW2 (0x1e)
-#define FSC_SECC_TTW3 (0x1f)
-
-/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xf)
-
-#define HSR_EC_UNKNOWN (0x00)
-#define HSR_EC_WFI (0x01)
-#define HSR_EC_CP15_32 (0x03)
-#define HSR_EC_CP15_64 (0x04)
-#define HSR_EC_CP14_MR (0x05)
-#define HSR_EC_CP14_LS (0x06)
-#define HSR_EC_CP_0_13 (0x07)
-#define HSR_EC_CP10_ID (0x08)
-#define HSR_EC_JAZELLE (0x09)
-#define HSR_EC_BXJ (0x0A)
-#define HSR_EC_CP14_64 (0x0C)
-#define HSR_EC_SVC_HYP (0x11)
-#define HSR_EC_HVC (0x12)
-#define HSR_EC_SMC (0x13)
-#define HSR_EC_IABT (0x20)
-#define HSR_EC_IABT_HYP (0x21)
-#define HSR_EC_DABT (0x24)
-#define HSR_EC_DABT_HYP (0x25)
-#define HSR_EC_MAX (0x3f)
-
-#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-
-#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-
-#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
-#define HSR_DABT_CM (_AC(1, UL) << 8)
-#define HSR_DABT_EA (_AC(1, UL) << 9)
-
-#define kvm_arm_exception_type \
- {0, "RESET" }, \
- {1, "UNDEFINED" }, \
- {2, "SOFTWARE" }, \
- {3, "PREF_ABORT" }, \
- {4, "DATA_ABORT" }, \
- {5, "IRQ" }, \
- {6, "FIQ" }, \
- {7, "HVC" }
-
-#define HSRECN(x) { HSR_EC_##x, #x }
-
-#define kvm_arm_exception_class \
- HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
- HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
- HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
- HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
- HSRECN(DABT), HSRECN(DABT_HYP)
-
-
-#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
deleted file mode 100644
index 14d68a4d826f..000000000000
--- a/arch/arm/include/asm/kvm_asm.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_ASM_H__
-#define __ARM_KVM_ASM_H__
-
-#include <asm/virt.h>
-
-#define ARM_EXIT_WITH_ABORT_BIT 31
-#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
-#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
-
-#define ARM_EXCEPTION_RESET 0
-#define ARM_EXCEPTION_UNDEFINED 1
-#define ARM_EXCEPTION_SOFTWARE 2
-#define ARM_EXCEPTION_PREF_ABORT 3
-#define ARM_EXCEPTION_DATA_ABORT 4
-#define ARM_EXCEPTION_IRQ 5
-#define ARM_EXCEPTION_FIQ 6
-#define ARM_EXCEPTION_HVC 7
-#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-/*
- * The rr_lo_hi macro swaps a pair of registers depending on
- * current endianness. It is used in conjunction with ldrd and strd
- * instructions that load/store a 64-bit value from/to memory to/from
- * a pair of registers which are used with the mrrc and mcrr instructions.
- * If used with the ldrd/strd instructions, the a1 parameter is the first
- * source/destination register and the a2 parameter is the second
- * source/destination register. Note that the ldrd/strd instructions
- * already swap the bytes within the words correctly according to the
- * endianness setting, but the order of the registers need to be effectively
- * swapped when used with the mrrc/mcrr instructions.
- */
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define rr_lo_hi(a1, a2) a2, a1
-#else
-#define rr_lo_hi(a1, a2) a1, a2
-#endif
-
-#define kvm_ksym_ref(kva) (kva)
-
-#ifndef __ASSEMBLY__
-struct kvm;
-struct kvm_vcpu;
-
-extern char __kvm_hyp_init[];
-extern char __kvm_hyp_init_end[];
-
-extern char __kvm_hyp_vector[];
-
-extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
-
-extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-
-extern void __init_stage2_translation(void);
-
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern u64 __vgic_v3_read_vmcr(void);
-extern void __vgic_v3_write_vmcr(u32 vmcr);
-extern void __vgic_v3_init_lrs(void);
-
-#endif
-
-#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
deleted file mode 100644
index e74ab0fbab79..000000000000
--- a/arch/arm/include/asm/kvm_coproc.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2012 Rusty Russell IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_COPROC_H__
-#define __ARM_KVM_COPROC_H__
-#include <linux/kvm_host.h>
-
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_coproc_target_table {
- unsigned target;
- const struct coproc_reg *table;
- size_t num;
-};
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-void kvm_coproc_table_init(void);
-
-struct kvm_one_reg;
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
deleted file mode 100644
index 9a8a45aaf19a..000000000000
--- a/arch/arm/include/asm/kvm_emulate.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_EMULATE_H__
-#define __ARM_KVM_EMULATE_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_arm.h>
-#include <asm/cputype.h>
-
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
-
-static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
- u8 reg_num)
-{
- return *vcpu_reg(vcpu, reg_num);
-}
-
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
- unsigned long val)
-{
- *vcpu_reg(vcpu, reg_num) = val;
-}
-
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
-void kvm_inject_undefined(struct kvm_vcpu *vcpu);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
-
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
-{
- return kvm_condition_valid32(vcpu);
-}
-
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- kvm_skip_instr32(vcpu, is_wide_instr);
-}
-
-static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr = HCR_GUEST_MASK;
-}
-
-static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.hcr;
-}
-
-static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
-{
- vcpu->arch.hcr = hcr;
-}
-
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
-{
- return 1;
-}
-
-static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
-}
-
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
-}
-
-static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-}
-
-static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
-}
-
-static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;;
-}
-
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hsr;
-}
-
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
-{
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
-
- if (hsr & HSR_CV)
- return (hsr & HSR_COND) >> HSR_COND_SHIFT;
-
- return -1;
-}
-
-static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hxfar;
-}
-
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
-{
- return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
-}
-
-static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
-}
-
-static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
-}
-
-static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-}
-
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
-}
-
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
-}
-
-static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
-}
-
-/* Get Access Size from a data abort */
-static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
-{
- switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
- case 0:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return -EFAULT;
- }
-}
-
-/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
-}
-
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
-}
-
-static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
-}
-
-static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
-{
- return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- default:
- return be32_to_cpu(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return le16_to_cpu(data & 0xffff);
- default:
- return le32_to_cpu(data);
- }
- }
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- default:
- return cpu_to_be32(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_le16(data & 0xffff);
- default:
- return cpu_to_le32(data);
- }
- }
-}
-
-#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
deleted file mode 100644
index 127e2dd2e21c..000000000000
--- a/arch/arm/include/asm/kvm_host.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_HOST_H__
-#define __ARM_KVM_HOST_H__
-
-#include <linux/types.h>
-#include <linux/kvm_types.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/fpstate.h>
-#include <kvm/arm_arch_timer.h>
-
-#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_HAVE_ONE_REG
-#define KVM_HALT_POLL_NS_DEFAULT 500000
-
-#define KVM_VCPU_MAX_FEATURES 2
-
-#include <kvm/arm_vgic.h>
-
-
-#ifdef CONFIG_ARM_GIC_V3
-#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#else
-#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
-#endif
-
-#define KVM_REQ_SLEEP \
- KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-
-u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int __attribute_const__ kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_arch {
- /* VTTBR value associated with below pgd and vmid */
- u64 vttbr;
-
- /* The last vcpu id that ran on each physical CPU */
- int __percpu *last_vcpu_ran;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
-
- /* Stage-2 page table */
- pgd_t *pgd;
-
- /* Interrupt controller */
- struct vgic_dist vgic;
- int max_vcpus;
-};
-
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
-struct kvm_vcpu_fault_info {
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
-};
-
-/*
- * 0 is reserved as an invalid value.
- * Order should be kept in sync with the save/restore code.
- */
-enum vcpu_sysreg {
- __INVALID_SYSREG__,
- c0_MPIDR, /* MultiProcessor ID Register */
- c0_CSSELR, /* Cache Size Selection Register */
- c1_SCTLR, /* System Control Register */
- c1_ACTLR, /* Auxiliary Control Register */
- c1_CPACR, /* Coprocessor Access Control */
- c2_TTBR0, /* Translation Table Base Register 0 */
- c2_TTBR0_high, /* TTBR0 top 32 bits */
- c2_TTBR1, /* Translation Table Base Register 1 */
- c2_TTBR1_high, /* TTBR1 top 32 bits */
- c2_TTBCR, /* Translation Table Base Control R. */
- c3_DACR, /* Domain Access Control Register */
- c5_DFSR, /* Data Fault Status Register */
- c5_IFSR, /* Instruction Fault Status Register */
- c5_ADFSR, /* Auxilary Data Fault Status R */
- c5_AIFSR, /* Auxilary Instrunction Fault Status R */
- c6_DFAR, /* Data Fault Address Register */
- c6_IFAR, /* Instruction Fault Address Register */
- c7_PAR, /* Physical Address Register */
- c7_PAR_high, /* PAR top 32 bits */
- c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
- c10_PRRR, /* Primary Region Remap Register */
- c10_NMRR, /* Normal Memory Remap Register */
- c12_VBAR, /* Vector Base Address Register */
- c13_CID, /* Context ID Register */
- c13_TID_URW, /* Thread ID, User R/W */
- c13_TID_URO, /* Thread ID, User R/O */
- c13_TID_PRIV, /* Thread ID, Privileged */
- c14_CNTKCTL, /* Timer Control Register (PL1) */
- c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
- c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
- NR_CP15_REGS /* Number of regs (incl. invalid) */
-};
-
-struct kvm_cpu_context {
- struct kvm_regs gp_regs;
- struct vfp_hard_struct vfp;
- u32 cp15[NR_CP15_REGS];
-};
-
-typedef struct kvm_cpu_context kvm_cpu_context_t;
-
-struct kvm_vcpu_arch {
- struct kvm_cpu_context ctxt;
-
- int target; /* Processor target */
- DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
- /* The CPU type we expose to the VM */
- u32 midr;
-
- /* HYP trapping configuration */
- u32 hcr;
-
- /* Interrupt related fields */
- u32 irq_lines; /* IRQ and FIQ levels */
-
- /* Exception Information */
- struct kvm_vcpu_fault_info fault;
-
- /* Host FP context */
- kvm_cpu_context_t *host_cpu_context;
-
- /* VGIC state */
- struct vgic_cpu vgic_cpu;
- struct arch_timer_cpu timer_cpu;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* vcpu power-off state */
- bool power_off;
-
- /* Don't run the guest (internal implementation need) */
- bool pause;
-
- /* IO related fields */
- struct kvm_decode mmio_decode;
-
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
- /* Detect first run of a vcpu */
- bool has_run_once;
-};
-
-struct kvm_vm_stat {
- ulong remote_tlb_flush;
-};
-
-struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
- u64 hvc_exit_stat;
- u64 wfe_exit_stat;
- u64 wfi_exit_stat;
- u64 mmio_exit_user;
- u64 mmio_exit_kernel;
- u64 exits;
-};
-
-#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
-
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-unsigned long kvm_call_hyp(void *hypfn, ...);
-void force_vm_exit(const cpumask_t *mask);
-
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-
-/* We do not have shadow page tables, hence the empty hooks */
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
-}
-
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
-void kvm_arm_halt_guest(struct kvm *kvm);
-void kvm_arm_resume_guest(struct kvm *kvm);
-
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
-
-static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
- unsigned long hyp_stack_ptr,
- unsigned long vector_ptr)
-{
- /*
- * Call initialization code, and switch to the full blown HYP
- * code. The init code doesn't need to preserve these
- * registers as r0-r3 are already callee saved according to
- * the AAPCS.
- * Note that we slightly misuse the prototype by casting the
- * stack pointer to a void *.
-
- * The PGDs are always passed as the third argument, in order
- * to be passed into r2-r3 to the init code (yes, this is
- * compliant with the PCS!).
- */
-
- kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
-}
-
-static inline void __cpu_init_stage2(void)
-{
- kvm_call_hyp(__init_stage2_translation);
-}
-
-static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
-{
- return 0;
-}
-
-int kvm_perf_init(void);
-int kvm_perf_teardown(void);
-
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-
-static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_arm_init_debug(void) {}
-static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-
-int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-
-#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
deleted file mode 100644
index 14b5903f0224..000000000000
--- a/arch/arm/include/asm/kvm_hyp.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_HYP_H__
-#define __ARM_KVM_HYP_H__
-
-#include <linux/compiler.h>
-#include <linux/kvm_host.h>
-#include <asm/cp15.h>
-#include <asm/kvm_mmu.h>
-#include <asm/vfp.h>
-
-#define __hyp_text __section(.hyp.text) notrace
-
-#define __ACCESS_VFP(CRn) \
- "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
-
-#define write_special(v, r) \
- asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
-#define read_special(r) ({ \
- u32 __val; \
- asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
- __val; \
-})
-
-#define TTBR0 __ACCESS_CP15_64(0, c2)
-#define TTBR1 __ACCESS_CP15_64(1, c2)
-#define VTTBR __ACCESS_CP15_64(6, c2)
-#define PAR __ACCESS_CP15_64(0, c7)
-#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
-#define CNTVOFF __ACCESS_CP15_64(4, c14)
-
-#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
-#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
-#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
-#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
-#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
-#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
-#define HCR __ACCESS_CP15(c1, 4, c1, 0)
-#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
-#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
-#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
-#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
-#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
-#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
-#define DACR __ACCESS_CP15(c3, 0, c0, 0)
-#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
-#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
-#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
-#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
-#define HSR __ACCESS_CP15(c5, 4, c2, 0)
-#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
-#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
-#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
-#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
-#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
-#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
-#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
-#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
-#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
-#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
-#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
-#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
-#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
-#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
-#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
-#define CID __ACCESS_CP15(c13, 0, c0, 1)
-#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
-#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
-#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
-#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
-#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
-#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
-#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
-
-#define VFP_FPEXC __ACCESS_VFP(FPEXC)
-
-/* AArch64 compatibility macros, only for the timer so far */
-#define read_sysreg_el0(r) read_sysreg(r##_el0)
-#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
-
-#define cntv_ctl_el0 CNTV_CTL
-#define cntv_cval_el0 CNTV_CVAL
-#define cntvoff_el2 CNTVOFF
-#define cnthctl_el2 CNTHCTL
-
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
-
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-
-void __sysreg_save_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
-
-void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-
-asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
-asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
-static inline bool __vfp_enabled(void)
-{
- return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
-}
-
-void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
-void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
-
-asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *host);
-asmlinkage int __hyp_do_panic(const char *, int, u32);
-
-#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
deleted file mode 100644
index f3a7de71f515..000000000000
--- a/arch/arm/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_MMIO_H__
-#define __ARM_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-struct kvm_decode {
- unsigned long rt;
- bool sign_extend;
-};
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
-
-#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
deleted file mode 100644
index fa6f2174276b..000000000000
--- a/arch/arm/include/asm/kvm_mmu.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_MMU_H__
-#define __ARM_KVM_MMU_H__
-
-#include <asm/memory.h>
-#include <asm/page.h>
-
-/*
- * We directly use the kernel VA for the HYP, as we can directly share
- * the mapping (HTTBR "covers" TTBR1).
- */
-#define kern_hyp_va(kva) (kva)
-
-/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
- */
-#define KVM_MMU_CACHE_MIN_PAGES 2
-
-#ifndef __ASSEMBLY__
-
-#include <linux/highmem.h>
-#include <asm/cacheflush.h>
-#include <asm/pgalloc.h>
-#include <asm/stage2_pgtable.h>
-
-int create_hyp_mappings(void *from, void *to, pgprot_t prot);
-int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
-void free_hyp_pgds(void);
-
-void stage2_unmap_vm(struct kvm *kvm);
-int kvm_alloc_stage2_pgd(struct kvm *kvm);
-void kvm_free_stage2_pgd(struct kvm *kvm);
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable);
-
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
-
-phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_get_idmap_vector(void);
-int kvm_mmu_init(void);
-void kvm_clear_hyp_idmap(void);
-
-static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
-{
- *pmd = new_pmd;
- dsb(ishst);
-}
-
-static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
- *pte = new_pte;
- dsb(ishst);
-}
-
-static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
-{
- pte_val(pte) |= L_PTE_S2_RDWR;
- return pte;
-}
-
-static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
-{
- pmd_val(pmd) |= L_PMD_S2_RDWR;
- return pmd;
-}
-
-static inline void kvm_set_s2pte_readonly(pte_t *pte)
-{
- pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
-}
-
-static inline bool kvm_s2pte_readonly(pte_t *pte)
-{
- return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
-}
-
-static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
-{
- pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
-{
- return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_page_empty(void *ptr)
-{
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
-}
-
-#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define kvm_pud_table_empty(kvm, pudp) false
-
-#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define hyp_pud_table_empty(pudp) false
-
-struct kvm;
-
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
-
-static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
-{
- return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
-}
-
-static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
- kvm_pfn_t pfn,
- unsigned long size)
-{
- /*
- * If we are going to insert an instruction page and the icache is
- * either VIPT or PIPT, there is a potential problem where the host
- * (or another VM) may have used the same page as this guest, and we
- * read incorrect data from the icache. If we're using a PIPT cache,
- * we can invalidate just that page, but if we are using a VIPT cache
- * we need to invalidate the entire icache - damn shame - as written
- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
- *
- * VIVT caches are tagged using both the ASID and the VMID and doesn't
- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
- *
- * We need to do this through a kernel mapping (using the
- * user-space mapping has proved to be the wrong
- * solution). For that, we need to kmap one page at a time,
- * and iterate over the range.
- */
-
- VM_BUG_ON(size & ~PAGE_MASK);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- if (icache_is_pipt())
- __cpuc_coherent_user_range((unsigned long)va,
- (unsigned long)va + PAGE_SIZE);
-
- size -= PAGE_SIZE;
- pfn++;
-
- kunmap_atomic(va);
- }
-
- if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
- }
-}
-
-static inline void __kvm_flush_dcache_pte(pte_t pte)
-{
- void *va = kmap_atomic(pte_page(pte));
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- kunmap_atomic(va);
-}
-
-static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
-{
- unsigned long size = PMD_SIZE;
- kvm_pfn_t pfn = pmd_pfn(pmd);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- pfn++;
- size -= PAGE_SIZE;
-
- kunmap_atomic(va);
- }
-}
-
-static inline void __kvm_flush_dcache_pud(pud_t pud)
-{
-}
-
-#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-
-void kvm_set_way_flush(struct kvm_vcpu *vcpu);
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
-
-static inline bool __kvm_cpu_uses_extended_idmap(void)
-{
- return false;
-}
-
-static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
- pgd_t *hyp_pgd,
- pgd_t *merged_hyp_pgd,
- unsigned long hyp_idmap_start) { }
-
-static inline unsigned int kvm_get_vmid_bits(void)
-{
- return 8;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
deleted file mode 100644
index 6bda945d31fa..000000000000
--- a/arch/arm/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_PSCI_H__
-#define __ARM_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1 1
-#define KVM_ARM_PSCI_0_2 2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/limits.h b/arch/arm/include/asm/limits.h
deleted file mode 100644
index 08d8c6600804..000000000000
--- a/arch/arm/include/asm/limits.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __ASM_PIPE_H
-#define __ASM_PIPE_H
-
-#ifndef PAGE_SIZE
-#include <asm/page.h>
-#endif
-
-#define PIPE_BUF PAGE_SIZE
-
-#endif
-
diff --git a/arch/arm/include/asm/linkage.h b/arch/arm/include/asm/linkage.h
index 5a25632b1bc0..c4670694ada7 100644
--- a/arch/arm/include/asm/linkage.h
+++ b/arch/arm/include/asm/linkage.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 5c1ad11aa392..2b18a258204d 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/arch.h
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
@@ -59,9 +56,6 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- void (*handle_irq)(struct pt_regs *);
-#endif
void (*restart)(enum reboot_mode, const char *);
};
@@ -84,7 +78,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define MACHINE_START(_type,_name) \
static const struct machine_desc __mach_desc_##_type \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = MACH_TYPE_##_type, \
.name = _name,
@@ -94,7 +88,7 @@ static const struct machine_desc __mach_desc_##_type \
#define DT_MACHINE_START(_name, _namestr) \
static const struct machine_desc __mach_desc_##_name \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = ~0, \
.name = _namestr,
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
index 9e614a18e680..5ec11d7f0d04 100644
--- a/arch/arm/include/asm/mach/dma.h
+++ b/arch/arm/include/asm/mach/dma.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/dma.h
*
* Copyright (C) 1998-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This header file describes the interface between the generic DMA handler
* (dma.c) and the architecture-specific DMA backends (dma-*.c)
*/
@@ -47,8 +44,3 @@ struct dma_struct {
* isa_dma_add - add an ISA-style DMA channel
*/
extern int isa_dma_add(unsigned int, dma_t *dma);
-
-/*
- * Add the ISA DMA controller. Always takes channels 0-7.
- */
-extern void isa_init_dma(void);
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index bada3f845a97..c9cbfdefc938 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/flash.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_MACH_FLASH_H
#define ASMARM_MACH_FLASH_H
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index de4634b51456..dfe832a3bfc7 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/irq.h
*
* Copyright (C) 1995-2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_MACH_IRQ_H
#define __ASM_ARM_MACH_IRQ_H
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 9b7c328fb207..2b8970d8e5a2 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/map.h
*
* Copyright (C) 1999-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Page table mapping constructs and function prototypes
*/
#ifndef __ASM_MACH_MAP_H
@@ -30,6 +27,7 @@ enum {
MT_HIGH_VECTORS,
MT_MEMORY_RWX,
MT_MEMORY_RW,
+ MT_MEMORY_RO,
MT_ROM,
MT_MEMORY_RWX_NONCACHED,
MT_MEMORY_RW_DTCM,
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 233b4b50eff3..ea9bd08895b7 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/pci.h
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_MACH_PCI_H
@@ -20,10 +17,8 @@ struct pci_host_bridge;
struct device;
struct hw_pci {
- struct msi_controller *msi_ctrl;
struct pci_ops *ops;
int nr_controllers;
- unsigned int io_optional:1;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
int (*scan)(int nr, struct pci_host_bridge *);
@@ -31,11 +26,6 @@ struct hw_pci {
void (*postinit)(void);
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
- resource_size_t (*align_resource)(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align);
};
/*
diff --git a/arch/arm/include/asm/mach/sharpsl_param.h b/arch/arm/include/asm/mach/sharpsl_param.h
index 7a24ecf04220..700a377c20bf 100644
--- a/arch/arm/include/asm/mach/sharpsl_param.h
+++ b/arch/arm/include/asm/mach/sharpsl_param.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hardware parameter area specific to Sharp SL series devices
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
struct sharpsl_param_info {
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 0f79e4dec7f9..5f522916ec99 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -1,19 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/time.h
*
* Copyright (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-extern void timer_tick(void);
-
typedef void (*clock_access_fn)(struct timespec64 *);
-extern int register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent);
+extern int register_persistent_clock(clock_access_fn read_persistent);
#endif
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index e8567bb99dfc..58922879a600 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Machine dependent access functions for RTC registers.
*/
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index acd4983d9b1f..755c97de348c 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mcpm.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MCPM_H
diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h
index f652ad65840a..529d2cf4d06f 100644
--- a/arch/arm/include/asm/mcs_spinlock.h
+++ b/arch/arm/include/asm/mcs_spinlock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MCS_LOCK_H
#define __ASM_MCS_LOCK_H
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index bf47a6c110a2..b10fd358ccc5 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1f54e4e98c1e..7c2fa7dcec6d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -1,18 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/memory.h
*
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Note: this file should not be included by non-asm/.h files
+ * Note: this file should not be included explicitly, include <asm/page.h>
+ * to get access to these definitions.
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#ifndef _ASMARM_PAGE_H
+#error "Do not include <asm/memory.h> directly"
+#endif
+
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
@@ -21,15 +23,16 @@
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
+#include <asm/kasan_def.h>
/*
- * Allow for constants defined here to be used from assembly code
- * by prepending the UL suffix only with actual C code compilation.
+ * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
+ * the virtual address range for userspace.
+ * KERNEL_OFFSET: the virtual address of the start of the kernel image.
+ * we may further offset this with TEXT_OFFSET in practice.
*/
-#define UL(x) _AC(x, UL)
-
-/* PAGE_OFFSET - the virtual address of the start of the kernel image */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define KERNEL_OFFSET (PAGE_OFFSET)
#ifdef CONFIG_MMU
@@ -37,7 +40,11 @@
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
+#ifndef CONFIG_KASAN
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
@@ -76,6 +83,10 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+#define FDT_FIXED_BASE UL(0xff800000)
+#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
@@ -88,6 +99,7 @@
#else /* CONFIG_MMU */
#ifndef __ASSEMBLY__
+extern unsigned long setup_vectors_base(void);
extern unsigned long vectors_base;
#define VECTORS_BASE vectors_base
#endif
@@ -115,6 +127,7 @@ extern unsigned long vectors_base;
#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
+#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */
@@ -135,12 +148,6 @@ extern unsigned long vectors_base;
#endif
/*
- * Convert a page to/from a physical address
- */
-#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
-#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
-
-/*
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
* memory. This is used for XIP and NoMMU kernels, and on platforms that don't
* have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
@@ -148,22 +155,15 @@ extern unsigned long vectors_base;
*/
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#ifdef CONFIG_XIP_KERNEL
+#ifndef __ASSEMBLY__
+
/*
- * When referencing data in RAM from the XIP region in a relative manner
- * with the MMU off, we need the relative offset between the two physical
- * addresses. The macro below achieves this, which is:
- * __pa(v_data) - __xip_pa(v_text)
+ * Physical start and end address of the kernel sections. These addresses are
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
-#define PHYS_RELATIVE(v_data, v_text) \
- (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \
- ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \
- CONFIG_XIP_PHYS_ADDR))
-#else
-#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text))
-#endif
-
-#ifndef __ASSEMBLY__
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
@@ -181,6 +181,7 @@ extern unsigned long vectors_base;
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x810000
#define __PV_BITS_7_0 0x81
extern unsigned long __pv_phys_pfn_offset;
@@ -191,43 +192,65 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
-#define __pv_stub(from,to,instr,type) \
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
+ "2: " instr " %0, %0, %3\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 1b - ., 2b - .\n" \
" .popsection\n" \
: "=r" (to) \
- : "r" (from), "I" (type))
+ : "r" (from), "I" (__PV_BITS_31_24), \
+ "I"(__PV_BITS_23_16))
-#define __pv_stub_mov_hi(t) \
- __asm__ volatile("@ __pv_stub_mov\n" \
- "1: mov %R0, %1\n" \
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " adds %Q0, %1, %R0, lsl #20\n" \
+ "1: mov %R0, %2\n" \
+ " adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - ., 1b - .\n" \
" .popsection\n" \
- : "=r" (t) \
- : "I" (__PV_BITS_7_0))
+ : "=&r" (y) \
+ : "r" (x), "I" (__PV_BITS_7_0) \
+ : "cc")
+
+#else
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "0: movw %0, #0\n" \
+ " lsl %0, #21\n" \
+ " " instr " %0, %1, %0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - .\n" \
+ " .popsection\n" \
+ : "=&r" (to) \
+ : "r" (from))
#define __pv_add_carry_stub(x, y) \
- __asm__ volatile("@ __pv_add_carry_stub\n" \
- "1: adds %Q0, %1, %2\n" \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " lsls %R0, #21\n" \
+ " adds %Q0, %1, %R0\n" \
+ "1: mvn %R0, #0\n" \
" adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - ., 1b - .\n" \
" .popsection\n" \
- : "+r" (y) \
- : "r" (x), "I" (__PV_BITS_31_24) \
+ : "=&r" (y) \
+ : "r" (x) \
: "cc")
+#endif
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
phys_addr_t t;
if (sizeof(phys_addr_t) == 4) {
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ __pv_stub(x, t, "add");
} else {
- __pv_stub_mov_hi(t);
__pv_add_carry_stub(x, t);
}
return t;
@@ -243,7 +266,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
* assembler expression receives 32 bit argument
* in place where 'r' 32 bit operand is expected.
*/
- __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
+ __pv_stub((unsigned long) x, t, "sub");
return t;
}
@@ -264,10 +287,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
-#define virt_to_pfn(kaddr) \
- ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
- PHYS_PFN_OFFSET)
-
+static inline unsigned long virt_to_pfn(const void *p)
+{
+ unsigned long kaddr = (unsigned long)p;
+ return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+}
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL
@@ -346,19 +371,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
/*
- * Virtual <-> DMA view memory address translations
- * Again, these are *only* valid on the kernel direct mapped RAM
- * memory. Use of these is *deprecated* (and that doesn't mean
- * use the __ prefixed forms instead.) See dma-mapping.h.
- */
-#ifndef __virt_to_bus
-#define __virt_to_bus __virt_to_phys
-#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) __pfn_to_phys(x)
-#define __bus_to_pfn(x) __phys_to_pfn(x)
-#endif
-
-/*
* Conversion between a struct page and a physical address.
*
* page_to_pfn(page) convert a struct page * to a PFN number
@@ -375,6 +387,4 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
#endif
-#include <asm-generic/memory_model.h>
-
#endif
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 000000000000..2189e507c8e0
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index a5b47421059d..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM_MMU_H
#define __ARM_MMU_H
@@ -9,11 +10,15 @@ typedef struct {
#else
int switch_pending;
#endif
- unsigned int vmalloc_seq;
+ atomic_t vmalloc_seq;
unsigned long sigpage;
#ifdef CONFIG_VDSO
unsigned long vdso;
#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
@@ -33,6 +38,10 @@ typedef struct {
*/
typedef struct {
unsigned long end_brk;
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 7f303295ef19..db2cb06aa8cf 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mmu_context.h
*
* Copyright (C) 1996 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Changelog:
* 27-06-1996 RMK Created
*/
@@ -26,9 +23,21 @@
void __check_vmalloc_seq(struct mm_struct *mm);
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -53,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
if (irqs_disabled())
/*
@@ -95,33 +103,11 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_MMU */
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-
#endif /* CONFIG_CPU_HAS_ASID */
-#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
@@ -152,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { } while (0)
+#ifdef CONFIG_VMAP_STACK
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (mm != &init_mm)
+ check_vmalloc_seq(mm);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+#endif
+
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index ed2319663a1e..07c51a34f77d 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -1,31 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_MODULE_H
#define _ASM_ARM_MODULE_H
#include <asm-generic/module.h>
-
-struct unwind_table;
+#include <asm/unwind.h>
#ifdef CONFIG_ARM_UNWIND
-enum {
- ARM_SEC_INIT,
- ARM_SEC_DEVINIT,
- ARM_SEC_CORE,
- ARM_SEC_EXIT,
- ARM_SEC_DEVEXIT,
- ARM_SEC_HOT,
- ARM_SEC_UNLIKELY,
- ARM_SEC_MAX,
-};
+#define ELF_SECTION_UNWIND 0x70000001
#endif
+#define PLT_ENT_STRIDE L1_CACHE_BYTES
+#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
+#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
+
+struct plt_entries {
+ u32 ldr[PLT_ENT_COUNT];
+ u32 lit[PLT_ENT_COUNT];
+};
+
struct mod_plt_sec {
struct elf32_shdr *plt;
+ struct plt_entries *plt_ent;
int plt_count;
};
struct mod_arch_specific {
#ifdef CONFIG_ARM_UNWIND
- struct unwind_table *unwind[ARM_SEC_MAX];
+ struct list_head unwind_list;
+ struct unwind_table *init_table;
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
struct mod_plt_sec core;
@@ -33,30 +35,23 @@ struct mod_arch_specific {
#endif
};
+struct module;
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
-
-/*
- * Add the ARM architecture version to the version magic string
- */
-#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
-
-/* Add __virt_to_phys patching state as well */
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#ifdef CONFIG_ARM_MODULE_PLTS
+bool in_module_plt(unsigned long loc);
#else
-#define MODULE_ARCH_VERMAGIC_P2V ""
+static inline bool in_module_plt(unsigned long loc) { return false; }
#endif
-/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
#ifdef CONFIG_THUMB2_KERNEL
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
-#else
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ if (ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ return sym->st_value & ~1;
+
+ return sym->st_value;
+}
#endif
-#define MODULE_ARCH_VERMAGIC \
- MODULE_ARCH_VERMAGIC_ARMVSN \
- MODULE_ARCH_VERMAGIC_ARMTHUMB \
- MODULE_ARCH_VERMAGIC_P2V
-
#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/module.lds.h b/arch/arm/include/asm/module.lds.h
new file mode 100644
index 000000000000..0e7cb4e314b4
--- /dev/null
+++ b/arch/arm/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_ARM_MODULE_PLTS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index c3247cc2fe08..5e088c83d3d8 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM_MPU_H
#define __ARM_MPU_H
-#ifdef CONFIG_ARM_MPU
-
/* MPUIR layout */
#define MPUIR_nU 1
#define MPUIR_DREGION 8
@@ -13,64 +12,122 @@
/* ID_MMFR0 data relevant to MPU */
#define MMFR0_PMSA (0xF << 4)
#define MMFR0_PMSAv7 (3 << 4)
+#define MMFR0_PMSAv8 (4 << 4)
/* MPU D/I Size Register fields */
-#define MPU_RSR_SZ 1
-#define MPU_RSR_EN 0
+#define PMSAv7_RSR_SZ 1
+#define PMSAv7_RSR_EN 0
+#define PMSAv7_RSR_SD 8
+
+/* Number of subregions (SD) */
+#define PMSAv7_NR_SUBREGS 8
+#define PMSAv7_MIN_SUBREG_SIZE 256
/* The D/I RSR value for an enabled region spanning the whole of memory */
-#define MPU_RSR_ALL_MEM 63
+#define PMSAv7_RSR_ALL_MEM 63
/* Individual bits in the DR/IR ACR */
-#define MPU_ACR_XN (1 << 12)
-#define MPU_ACR_SHARED (1 << 2)
+#define PMSAv7_ACR_XN (1 << 12)
+#define PMSAv7_ACR_SHARED (1 << 2)
/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
-#define MPU_RGN_CACHEABLE 0xB
-#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
-#define MPU_RGN_STRONGLY_ORDERED 0
+#define PMSAv7_RGN_CACHEABLE 0xB
+#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
+#define PMSAv7_RGN_STRONGLY_ORDERED 0
/* Main region should only be shared for SMP */
#ifdef CONFIG_SMP
-#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#else
-#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE
+#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
#endif
/* Access permission bits of ACR (only define those that we use)*/
-#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
-#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
-#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
+#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
+#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
+#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
+#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
+
+#define PMSAv8_BAR_XN 1
+
+#define PMSAv8_LAR_EN 1
+#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
+
+
+#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
+#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
+#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
+
+#ifdef CONFIG_SMP
+#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
+#else
+#define PMSAv8_RGN_SHARED (0 << 3)
+#endif
+
+#define PMSAv8_RGN_DEVICE_nGnRnE 0
+#define PMSAv8_RGN_NORMAL 1
+
+#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
+
+#ifdef CONFIG_CPU_V7M
+#define PMSAv8_MINALIGN 32
+#else
+#define PMSAv8_MINALIGN 64
+#endif
/* For minimal static MPU region configurations */
-#define MPU_PROBE_REGION 0
-#define MPU_BG_REGION 1
-#define MPU_RAM_REGION 2
-#define MPU_VECTORS_REGION 3
+#define PMSAv7_PROBE_REGION 0
+#define PMSAv7_BG_REGION 1
+#define PMSAv7_RAM_REGION 2
+#define PMSAv7_ROM_REGION 3
+
+/* Fixed for PMSAv8 only */
+#define PMSAv8_XIP_REGION 0
+#define PMSAv8_KERNEL_REGION 1
/* Maximum number of regions Linux is interested in */
-#define MPU_MAX_REGIONS 16
+#define MPU_MAX_REGIONS 16
-#define MPU_DATA_SIDE 0
-#define MPU_INSTR_SIDE 1
+#define PMSAv7_DATA_SIDE 0
+#define PMSAv7_INSTR_SIDE 1
#ifndef __ASSEMBLY__
struct mpu_rgn {
/* Assume same attributes for d/i-side */
- u32 drbar;
- u32 drsr;
- u32 dracr;
+ union {
+ u32 drbar; /* PMSAv7 */
+ u32 prbar; /* PMSAv8 */
+ };
+ union {
+ u32 drsr; /* PMSAv7 */
+ u32 prlar; /* PMSAv8 */
+ };
+ union {
+ u32 dracr; /* PMSAv7 */
+ u32 unused; /* not used in PMSAv8 */
+ };
};
struct mpu_rgn_info {
- u32 mpuir;
+ unsigned int used;
struct mpu_rgn rgns[MPU_MAX_REGIONS];
};
extern struct mpu_rgn_info mpu_rgn_info;
-#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_ARM_MPU
+extern void __init pmsav7_adjust_lowmem_bounds(void);
+extern void __init pmsav8_adjust_lowmem_bounds(void);
-#endif /* CONFIG_ARM_MPU */
+extern void __init pmsav7_setup(void);
+extern void __init pmsav8_setup(void);
+#else
+static inline void pmsav7_adjust_lowmem_bounds(void) {};
+static inline void pmsav8_adjust_lowmem_bounds(void) {};
+static inline void pmsav7_setup(void) {};
+static inline void pmsav8_setup(void) {};
+#endif
+
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
index d79d66d2cf71..dfcef0152e3d 100644
--- a/arch/arm/include/asm/mtd-xip.h
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MTD primitives for XIP support. Architecture specific functions
*
@@ -6,10 +7,6 @@
* Author: Nicolas Pitre
* Created: Nov 2, 2004
* Copyright: (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARM_MTD_XIP_H__
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
index 8f730fe70093..aac10ba33ee2 100644
--- a/arch/arm/include/asm/neon.h
+++ b/arch/arm/include/asm/neon.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/include/asm/neon.h
*
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <asm/hwcap.h>
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
index 04e5a557a884..66b7e68c9b58 100644
--- a/arch/arm/include/asm/nwflash.h
+++ b/arch/arm/include/asm/nwflash.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FLASH_H
#define _FLASH_H
-#define FLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
#define CMD_WRITE_DISABLE 0
#define CMD_WRITE_ENABLE 0x28
#define CMD_WRITE_BASE64K_ENABLE 0x47
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
index bc3a9174417c..b6f4b35024b8 100644
--- a/arch/arm/include/asm/opcodes-sec.h
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
index efcfdf92d9d5..0b58da81b78e 100644
--- a/arch/arm/include/asm/opcodes-virt.h
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
* Copyright (C) 2012 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __ASM_ARM_OPCODES_VIRT_H
#define __ASM_ARM_OPCODES_VIRT_H
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index e796c598513b..38e3eabff5c3 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/opcodes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_OPCODES_H
@@ -113,12 +110,17 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
-#ifndef CONFIG_CPU_ENDIAN_BE32
+#ifdef CONFIG_CPU_ENDIAN_BE32
+#ifndef __ASSEMBLY__
/*
* On BE32 systems, using 32-bit accesses to store Thumb instructions will not
* work in all cases, due to alignment constraints. For now, a correct
- * version is not provided for BE32.
+ * version is not provided for BE32, but the prototype needs to be there
+ * to compile patch.c.
*/
+extern __u32 __opcode_to_mem_thumb32(__u32);
+#endif
+#else
#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index c2bf24f40177..3364637755e8 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index 8f2c47bec375..7c2c72323d17 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/page-nommu.h
*
* Copyright (C) 2004 Hyok S. Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_NOMMU_H
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 4355f0ec44d6..ef11b721230e 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -1,19 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/page.h
*
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+#include <vdso/page.h>
#ifndef __ASSEMBLY__
@@ -116,6 +110,28 @@ struct cpu_user_fns {
unsigned long vaddr, struct vm_area_struct *vma);
};
+void fa_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
+void feroceon_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wb_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wt_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+
#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;
@@ -150,6 +166,9 @@ extern void copy_page(void *to, const void *from);
#include <asm/pgtable-3level-types.h>
#else
#include <asm/pgtable-2level-types.h>
+#ifdef CONFIG_VMAP_STACK
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+#endif
#endif
#endif /* CONFIG_MMU */
@@ -158,16 +177,16 @@ typedef struct page *pgtable_t;
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
+#define pfn_valid pfn_valid
#endif
-#include <asm/memory.h>
-
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#include <asm/memory.h>
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/getorder.h>
+#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
index 8435ff591386..95d5b0d625cd 100644
--- a/arch/arm/include/asm/paravirt.h
+++ b/arch/arm/include/asm/paravirt.h
@@ -1,19 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_PARAVIRT_H
#define _ASM_ARM_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops {
- unsigned long long (*steal_clock)(int cpu);
-};
-extern struct pv_time_ops pv_time_ops;
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_time_ops.steal_clock(cpu);
+ return static_call(pv_steal_clock)(cpu);
}
#endif
diff --git a/arch/arm/include/asm/paravirt_api_clock.h b/arch/arm/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..65ac7cee0dad
--- /dev/null
+++ b/arch/arm/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 396c92bcc0cf..5916b88d4c94 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_PCI_H
#define ASMARM_PCI_H
@@ -9,10 +10,7 @@ extern unsigned long pcibios_min_io;
extern unsigned long pcibios_min_mem;
#define PCIBIOS_MIN_MEM pcibios_min_mem
-static inline int pcibios_assign_all_busses(void)
-{
- return pci_has_flag(PCI_REASSIGN_ALL_RSRC);
-}
+#define pcibios_assign_all_busses() pci_has_flag(PCI_REASSIGN_ALL_BUS)
#ifdef CONFIG_PCI_DOMAINS
static inline int pci_proc_domain(struct pci_bus *bus)
@@ -21,20 +19,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI_DOMAINS */
-/*
- * The PCI address space does equal the physical memory address space.
- * The networking and block device layers use this boolean for bounce
- * buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (1)
-
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
+extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index a89b4076cde4..7545c87c251f 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -1,33 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2012 Calxeda, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/insn.h>
+
+register unsigned long current_stack_pointer asm ("sp");
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
-#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
+#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
+ extern unsigned int smp_on_up;
+
+ if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
+ return;
+
/* Set TPIDRPRW */
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
@@ -36,8 +34,28 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
- : "Q" (*(const unsigned long *)current_stack_pointer));
+ asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __per_cpu_offset \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 4f9dec489931..c08f16f2e243 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -1,27 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/include/asm/perf_event.h
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-#ifdef CONFIG_PERF_EVENTS
-struct pt_regs;
-extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-#endif
-
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
- (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index b2902a5cd780..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgalloc.h
*
* Copyright (C) 2000-2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
@@ -18,25 +15,13 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#define check_pgt_cache() do { } while (0)
-
#ifdef CONFIG_MMU
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#ifdef CONFIG_ARM_LPAE
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- free_page((unsigned long)pmd);
-}
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
@@ -44,21 +29,24 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#else /* !CONFIG_ARM_LPAE */
+#define PGD_SIZE (PAGE_SIZE << 2)
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
+#ifdef CONFIG_KASAN
+/* The KASan core unconditionally calls pud_populate() on all architectures */
+#define pud_populate(mm,pmd,pte) do { } while (0)
+#else
#define pud_populate(mm,pmd,pte) BUG()
-
+#endif
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
-
static inline void clean_pte_table(pte_t *pte)
{
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
@@ -80,54 +68,42 @@ static inline void clean_pte_table(pte_t *pte)
* | h/w pt 1 |
* +------------+
*/
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PGD_FREE
+#include <asm-generic/pgalloc.h>
+
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
+ pte_t *pte = __pte_alloc_one_kernel(mm);
- pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_pte_table(pte);
return pte;
}
+#ifdef CONFIG_HIGHPTE
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
+#else
+#define PGTABLE_HIGHMEM 0
+#endif
+
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
-#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
-#else
- pte = alloc_pages(PGALLOC_GFP, 0);
-#endif
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
if (!pte)
return NULL;
if (!PageHighMem(pte))
clean_pte_table(page_address(pte));
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
return pte;
}
-/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- if (pte)
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
pmdval_t prot)
{
@@ -167,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
__pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 3f82e9da7cec..556937e1790e 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level-hwdef.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H
#define _ASM_PGTABLE_2LEVEL_HWDEF_H
diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h
index 66cb5b0e89c5..650e793f4142 100644
--- a/arch/arm/include/asm/pgtable-2level-types.h
+++ b/arch/arm/include/asm/pgtable-2level-types.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level-types.h
*
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H
#define _ASM_PGTABLE_2LEVEL_TYPES_H
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8a9af0..6b5392e20f41 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Hardware-wise, we have a two level page table structure, where the first
@@ -78,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -127,6 +126,9 @@
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE L_PTE_RDONLY
+
/*
* These are the memory types, defined to be compatible with
* pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
@@ -180,19 +182,39 @@
* the pud: the pud entry is never bad, always exists, and can't be set or
* cleared.
*/
-#define pud_none(pud) (0)
-#define pud_bad(pud) (0)
-#define pud_present(pud) (1)
-#define pud_clear(pudp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
+static inline int pud_none(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return 1;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
+
+#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#define pmd_large(pmd) (pmd_val(pmd) & 2)
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & PMD_TYPE_SECT)
+#define pmd_bad(pmd) pmd_leaf(pmd)
#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
@@ -213,15 +235,12 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pmd_addr_end(addr,end) (end)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
-#define pte_special(pte) (0)
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* We don't have huge page support for short descriptors, for the moment
* define empty stubs for use by pin_page_for_write.
*/
#define pmd_hugewillfault(pmd) (0)
-#define pmd_thp_or_huge(pmd) (0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 4cd664abfcd3..944fc9955528 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level-hwdef.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
#define _ASM_PGTABLE_3LEVEL_HWDEF_H
@@ -26,6 +14,7 @@
* + Level 1/2 descriptor
* - common
*/
+#define PUD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
@@ -86,6 +75,7 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#ifndef CONFIG_CPU_TTBR0_PAN
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -105,5 +95,35 @@
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
+#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
+
+/*
+ * TTBCR register bits.
+ *
+ * The ORGN0 and IRGN0 bits enables different forms of caching when
+ * walking the translation table. Clearing these bits (which is claimed
+ * to be the reset default) means "normal memory, [outer|inner]
+ * non-cacheable"
+ */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1_MASK (3 << 28)
+#define TTBCR_ORGN1_MASK (3 << 26)
+#define TTBCR_IRGN1_MASK (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ_MASK (7 << 16)
+#define TTBCR_SH0_MASK (3 << 12)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ_MASK (7 << 0)
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
index 921aa30259c4..d0f587a2129d 100644
--- a/arch/arm/include/asm/pgtable-3level-types.h
+++ b/arch/arm/include/asm/pgtable-3level-types.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level-types.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
#define _ASM_PGTABLE_3LEVEL_TYPES_H
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 2a029bceaf2f..7b71a3d414b7 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_H
#define _ASM_PGTABLE_3LEVEL_H
@@ -37,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
@@ -86,6 +76,9 @@
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 7)
+
#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
@@ -116,36 +109,16 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
-/*
- * 2nd stage PTE definitions for LPAE.
- */
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
-#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
-#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-
-#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
-/*
- * Hyp-mode PL2 PTE definitions for LPAE.
- */
-#define L_PTE_HYP L_PTE_USER
-
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & 2))
+#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) (pud_val(pud))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
-#define pmd_large(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
do { \
@@ -159,19 +132,12 @@
flush_pmd_entry(pudp); \
} while (0)
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the second-level page table.. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
-#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -219,16 +185,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
pte_val(pte) |= L_PTE_SPECIAL;
return pte;
}
-#define __HAVE_ARCH_PTE_SPECIAL
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
-#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
-#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
@@ -239,7 +200,7 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
-PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkwrite_novma, &= ~L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
@@ -248,10 +209,12 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
+#define pmdp_establish generic_pmdp_establish
/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
}
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index 8426229ba292..d60548ccd13e 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-hwdef.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index a0d726a47c8a..61480d096054 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-nommu.h
*
* Copyright (C) 1995-2002 Russell King
* Copyright (C) 2004 Hyok S. Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_NOMMU_H
#define _ASMARM_PGTABLE_NOMMU_H
@@ -24,9 +21,6 @@
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
-/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -45,27 +39,14 @@
#define swapper_pg_dir ((pgd_t *) 0)
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
typedef pte_t *pte_addr_t;
/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
-/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) (prot)
-#define pgprot_dmacoherent(prot) (prot)
#define pgprot_device(prot) (prot)
@@ -75,11 +56,6 @@ typedef pte_t *pte_addr_t;
extern unsigned int kobjsize(const void *objp);
/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
@@ -88,8 +64,6 @@ extern unsigned int kobjsize(const void *objp);
#define FIRST_USER_ADDRESS 0UL
-#include <asm-generic/pgtable.h>
-
#else
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 1c462381c225..86378eec7757 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H
@@ -13,16 +10,23 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
-#ifndef CONFIG_MMU
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
+
+#ifndef CONFIG_MMU
#include <asm/pgtable-nommu.h>
#else
-#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopud.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
@@ -83,9 +87,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-extern pgprot_t pgprot_hyp_device;
-extern pgprot_t pgprot_s2;
-extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -98,12 +99,6 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
-#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
-#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
-#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
-#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -125,6 +120,9 @@ extern pgprot_t pgprot_s2_device;
#define pgprot_stronglyordered(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
@@ -147,42 +145,15 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgdp_get(pgpd) READ_ONCE(*pgdp)
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_none(pmd) (!pmd_val(pmd))
@@ -193,26 +164,10 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#ifndef CONFIG_HIGHPTE
-#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
-#define __pte_unmap(pte) do { } while (0)
-#else
-#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
-#define __pte_unmap(pte) kunmap_atomic(pte)
-#endif
-
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
-
-#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte)
-
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
@@ -232,6 +187,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_valid_user(pte) \
(pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+ pteval_t needed = mask;
+
+ if (write)
+ mask |= L_PTE_RDONLY;
+
+ return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
@@ -240,19 +207,11 @@ static inline void __sync_icache_dcache(pte_t pteval)
extern void __sync_icache_dcache(pte_t pteval);
#endif
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- unsigned long ext = 0;
-
- if (addr < TASK_SIZE && pte_valid_user(pteval)) {
- if (!pte_special(pteval))
- __sync_icache_dcache(pteval);
- ext |= PTE_EXT_NG;
- }
+#define PFN_PTE_SHIFT PAGE_SHIFT
- set_pte_ext(ptep, pteval, ext);
-}
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr);
+#define set_ptes set_ptes
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -271,7 +230,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
@@ -315,27 +274,47 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
}
/*
- * Encode and decode a swap entry. Swap entries are stored in the Linux
- * page tables as follows:
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset ------------------------> < type -> 0 0
+ * <------------------- offset ------------------> E < type -> 0 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
*
- * This gives us up to 31 swap files and 128GB per swap file. Note that
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
-#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#define __swp_entry_to_pte(swp) __pte((swp).val)
+
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_isset(pte, L_PTE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
/*
* It is an error for the kernel to have more swap files than we can
@@ -344,20 +323,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-/* FIXME: this is not correct */
-#define kern_addr_valid(addr) (1)
-
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 1e5b9bb92270..ebbd9ec95d21 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/probes.h
*
@@ -5,15 +6,6 @@
* which contains the following notice...
*
* Copyright (C) 2006, 2007 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ASM_PROBES_H
@@ -51,7 +43,6 @@ struct arch_probes_insn {
* We assume one instruction can consume at most 64 bytes stack, which is
* 'push {r0-r15}'. Instructions consume more or unknown stack space like
* 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
*/
#define MAX_STACK_SIZE 64
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f2e1af45bd6f..b4986a23d852 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/proc-fns.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROCFNS_H
#define __ASM_PROCFNS_H
@@ -23,7 +20,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
-extern struct processor {
+struct processor {
/* MISC
* get data abort address/flags
*/
@@ -37,6 +34,10 @@ extern struct processor {
*/
void (*_proc_init)(void);
/*
+ * Check for processor bugs
+ */
+ void (*check_bugs)(void);
+ /*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
@@ -75,9 +76,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
-} processor;
+};
#ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
@@ -94,22 +99,53 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
-#define cpu_proc_init processor._proc_init
-#define cpu_proc_fin processor._proc_fin
-#define cpu_reset processor.reset
-#define cpu_do_idle processor._do_idle
-#define cpu_dcache_clean_area processor.dcache_clean_area
-#define cpu_set_pte_ext processor.set_pte_ext
-#define cpu_do_switch_mm processor.switch_mm
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend processor.do_suspend
-#define cpu_do_resume processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised. We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f) cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ unsigned int cpu = smp_processor_id();
+ *cpu_vtable[cpu] = *p;
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+ cpu_vtable[0]->dcache_clean_area);
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+ cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f) processor.f
+#define PROC_TABLE(f) processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ processor = *p;
+}
#endif
-extern void cpu_resume(void);
+#define cpu_proc_init PROC_VTABLE(_proc_init)
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
+#define cpu_reset PROC_VTABLE(reset)
+#define cpu_do_idle PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
+#define cpu_do_resume PROC_VTABLE(do_resume)
+#endif
-#include <asm/memory.h>
+extern void cpu_resume(void);
#ifdef CONFIG_MMU
@@ -142,6 +178,18 @@ extern void cpu_resume(void);
})
#endif
+static inline unsigned int cpu_get_ttbcr(void)
+{
+ unsigned int ttbcr;
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ return ttbcr;
+}
+
+static inline void cpu_set_ttbcr(unsigned int ttbcr)
+{
+ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
+}
+
#else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { }
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index c3d5fc124a05..326864f79d18 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -1,28 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/processor.h
*
* Copyright (C) 1995-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PROCESSOR_H
#define __ASM_ARM_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/unified.h>
+#include <asm/vdso/processor.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -45,17 +37,36 @@ struct thread_struct {
struct debug_info debug;
};
-#define INIT_THREAD { }
+/*
+ * Everything usercopied to/from thread_struct is statically-sized, so
+ * no hardened usercopy whitelist is needed.
+ */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = *size = 0;
+}
-#ifdef CONFIG_MMU
-#define nommu_start_thread(regs) do { } while (0)
-#else
-#define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
-#endif
+#define INIT_THREAD { }
#define start_thread(regs,pc,sp) \
({ \
+ unsigned long r7, r8, r9; \
+ \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
+ r7 = regs->ARM_r7; \
+ r8 = regs->ARM_r8; \
+ r9 = regs->ARM_r9; \
+ } \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
+ current->personality & FDPIC_FUNCPTRS) { \
+ regs->ARM_r7 = r7; \
+ regs->ARM_r8 = r8; \
+ regs->ARM_r9 = r9; \
+ regs->ARM_r10 = current->mm->start_data; \
+ } else if (!IS_ENABLED(CONFIG_MMU)) \
+ regs->ARM_r10 = current->mm->start_data; \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
@@ -65,22 +76,12 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- nommu_start_thread(regs); \
})
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() smp_mb()
-#else
-#define cpu_relax() barrier()
-#endif
+unsigned long __get_wchan(struct task_struct *p);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
@@ -92,7 +93,8 @@ unsigned long get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
+ " .align 2\n" \
+ " .long 9998b - .\n" \
" " up "\n" \
" .popsection\n"
#else
@@ -119,16 +121,14 @@ static inline void prefetchw(const void *ptr)
__asm__ __volatile__(
".arch_extension mp\n"
__ALT_SMP_ASM(
- WASM(pldw) "\t%a0",
- WASM(pld) "\t%a0"
+ "pldw\t%a0",
+ "pld\t%a0"
)
:: "p" (ptr));
}
#endif
#endif
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
#endif
#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/procinfo.h b/arch/arm/include/asm/procinfo.h
index ca52e584ef74..42df316fb8ba 100644
--- a/arch/arm/include/asm/procinfo.h
+++ b/arch/arm/include/asm/procinfo.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/procinfo.h
*
* Copyright (C) 1996-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROCINFO_H
#define __ASM_PROCINFO_H
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index cd94ef2ef283..402e3f34c7ed 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -1,24 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/prom.h
*
* Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ASMARM_PROM_H
#define __ASMARM_PROM_H
#ifdef CONFIG_OF
-extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index e1b825dfab23..536e155328fa 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
diff --git a/arch/arm/include/asm/ptdump.h b/arch/arm/include/asm/ptdump.h
new file mode 100644
index 000000000000..46a4575146ee
--- /dev/null
+++ b/arch/arm/include/asm/ptdump.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 ARM Ltd. */
+#ifndef __ASM_PTDUMP_H
+#define __ASM_PTDUMP_H
+
+#ifdef CONFIG_ARM_PTDUMP_CORE
+
+#include <linux/mm_types.h>
+#include <linux/seq_file.h>
+
+struct addr_marker {
+ unsigned long start_address;
+ char *name;
+};
+
+struct ptdump_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+};
+
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
+#define EFI_RUNTIME_MAP_END SZ_1G
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+#else
+static inline void ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name) { }
+#endif /* CONFIG_ARM_PTDUMP_DEBUGFS */
+
+void ptdump_check_wx(void);
+
+#endif /* CONFIG_ARM_PTDUMP_CORE */
+
+#ifdef CONFIG_ARM_DEBUG_WX
+#define arm_debug_checkwx() ptdump_check_wx()
+#else
+#define arm_debug_checkwx() do { } while (0)
+#endif
+
+#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index e9c9a117bd25..6eb311fb2da0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/ptrace.h
*
* Copyright (C) 1996-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PTRACE_H
#define __ASM_ARM_PTRACE_H
@@ -13,6 +10,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
#include <linux/types.h>
struct pt_regs {
@@ -22,7 +20,7 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
- u32 addr_limit;
+ u32 ttbcr;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
@@ -39,8 +37,8 @@ struct svc_pt_regs {
#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
- ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
- (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
+ (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
+ FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
#else
#define isa_mode(regs) 1 /* Thumb */
#endif
@@ -126,8 +124,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
/*
* kprobe-based event tracer support
*/
-#include <linux/stddef.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
extern int regs_query_register_offset(const char *name);
@@ -168,5 +165,38 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->ARM_r0 = rc;
+}
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+{
+ if ((cpsr & 0x06000400) == 0) {
+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+ cpsr &= ~PSR_IT_MASK;
+ } else {
+ /* We need to shift left ITSTATE<4:0> */
+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+ unsigned long it = cpsr & mask;
+ it <<= 1;
+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+ it &= mask;
+ cpsr &= ~mask;
+ cpsr |= it;
+ }
+ return cpsr;
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
new file mode 100644
index 000000000000..e9ad0f37d2ba
--- /dev/null
+++ b/arch/arm/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_ARM
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "arm"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
index 803bbf2b20b8..700b8bcdf9bd 100644
--- a/arch/arm/include/asm/sections.h
+++ b/arch/arm/include/asm/sections.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_SECTIONS_H
#define _ASM_ARM_SECTIONS_H
@@ -5,4 +6,21 @@
extern char _exiprom[];
+extern char __idmap_text_start[];
+extern char __idmap_text_end[];
+extern char __entry_text_start[];
+extern char __entry_text_end[];
+
+static inline bool in_entry_text(unsigned long addr)
+{
+ return memory_contains(__entry_text_start, __entry_text_end,
+ (void *)addr, 1);
+}
+
+static inline bool in_idmap_text(unsigned long addr)
+{
+ void *a = (void *)addr;
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
+}
+
#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/secure_cntvoff.h b/arch/arm/include/asm/secure_cntvoff.h
new file mode 100644
index 000000000000..1f93aee1f630
--- /dev/null
+++ b/arch/arm/include/asm/secure_cntvoff.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_ARCH_CNTVOFF_H
+#define __ASMARM_ARCH_CNTVOFF_H
+
+extern void secure_cntvoff_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/semihost.h b/arch/arm/include/asm/semihost.h
new file mode 100644
index 000000000000..f365787e7c23
--- /dev/null
+++ b/arch/arm/include/asm/semihost.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Adapted for ARM and earlycon:
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Rob Herring <robh@kernel.org>
+ */
+
+#ifndef _ARM_SEMIHOST_H_
+#define _ARM_SEMIHOST_H_
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEMIHOST_SWI "0xab"
+#else
+#define SEMIHOST_SWI "0x123456"
+#endif
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("mov r1, %0\n"
+ "mov r0, #3\n"
+ "svc " SEMIHOST_SWI "\n"
+ : : "r" (&c) : "r0", "r1", "memory");
+}
+
+#endif /* _ARM_SEMIHOST_H_ */
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
index 5aa4315abe91..0211b9c5b14d 100644
--- a/arch/arm/include/asm/set_memory.h
+++ b/arch/arm/include/asm/set_memory.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_SET_MEMORY_H
@@ -14,6 +11,7 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
@@ -21,12 +19,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 3613d7e9fc40..cc106f946c69 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -1,28 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/asm/setup.h
*
* Copyright (C) 1997-1999 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See Documentation/arch/arm/setup.rst
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
+#include <linux/screen_info.h>
#include <uapi/asm/setup.h>
-#define __tag __used __attribute__((__section__(".taglist.init")))
+#define __tag __used __section(".taglist.init")
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
extern int arm_add_memory(u64 start, u64 size);
-extern void early_print(const char *str, ...);
+extern __printf(1, 2) void early_print(const char *str, ...);
extern void dump_machine_table(void);
#ifdef CONFIG_ATAGS_PROC
@@ -31,4 +29,15 @@ extern void save_atags(const struct tag *tags);
static inline void save_atags(const struct tag *tags) { }
#endif
+struct machine_desc;
+void init_default_cache_policy(unsigned long);
+void paging_init(const struct machine_desc *desc);
+void early_mm_init(const struct machine_desc *);
+void adjust_lowmem_bounds(void);
+void setup_dma_zone(const struct machine_desc *desc);
+
+#ifdef CONFIG_VGA_CONSOLE
+extern struct screen_info vgacon_screen_info;
+#endif
+
#endif
diff --git a/arch/arm/include/asm/shmparam.h b/arch/arm/include/asm/shmparam.h
index a5223b3a9bf9..367a9dac6150 100644
--- a/arch/arm/include/asm/shmparam.h
+++ b/arch/arm/include/asm/shmparam.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_SHMPARAM_H
#define _ASMARM_SHMPARAM_H
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index c0eb412aff04..8b84092d1518 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_SIGNAL_H
#define _ASMARM_SIGNAL_H
@@ -16,7 +17,14 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER)
+
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
+
+void do_rseq_syscall(struct pt_regs *regs);
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
#endif
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
new file mode 100644
index 000000000000..8549fa8b7253
--- /dev/null
+++ b/arch/arm/include/asm/simd.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SIMD_H
+#define _ASM_SIMD_H
+
+#include <linux/cleanup.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/neon.h>
+
+static __must_check inline bool may_use_simd(void)
+{
+ return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq()
+ && !irqs_disabled();
+}
+
+DEFINE_LOCK_GUARD_0(ksimd, kernel_neon_begin(), kernel_neon_end())
+
+#define scoped_ksimd() scoped_guard(ksimd)
+
+#endif /* _ASM_SIMD_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 3d6dc8b460e4..8c05a7f374d8 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/smp.h
*
* Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_SMP_H
#define __ASM_ARM_SMP_H
@@ -28,11 +25,6 @@ struct seq_file;
extern void show_ipi_list(struct seq_file *, int);
/*
- * Called from assembly code, this handles an IPI.
- */
-asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
-
-/*
* Called from C code, this handles an IPI.
*/
void handle_IPI(int ipinr, struct pt_regs *regs);
@@ -42,17 +34,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
*/
extern void smp_init_cpus(void);
-
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from platform specific assembly code, this is the
* secondary CPU entry point.
*/
-asmlinkage void secondary_start_kernel(void);
+asmlinkage void secondary_start_kernel(struct task_struct *task);
/*
@@ -60,20 +51,20 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
union {
- unsigned long mpu_rgn_szr;
+ struct mpu_rgn_info *mpu_rgn_info;
u64 pgdir;
};
unsigned long swapper_pg_dir;
void *stack;
+ struct task_struct *task;
};
extern struct secondary_data secondary_data;
-extern volatile int pen_release;
extern void secondary_startup(void);
extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
+static inline void __cpu_die(unsigned int cpu) { }
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -117,7 +108,7 @@ struct of_cpu_method {
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpu_method __cpu_method_of_table_##name \
- __used __section(__cpu_method_of_table) \
+ __used __section("__cpu_method_of_table") \
= { .method = _method, .ops = _ops }
/*
* set platform specific SMP operations
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index f9080717fc88..f2c36acf9886 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM specific SMP header, this contains our implementation
* details.
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index bfe163c40024..b818e5d0cd78 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H
@@ -7,6 +8,7 @@
#ifndef __ASSEMBLER__
+#include <linux/errno.h>
#include <asm/cputype.h>
static inline bool scu_a9_has_base(void)
@@ -26,6 +28,8 @@ static inline unsigned long scu_a9_get_base(void)
#ifdef CONFIG_HAVE_ARM_SCU
unsigned int scu_get_core_count(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
+int scu_cpu_power_enable(void __iomem *, unsigned int);
+int scu_get_cpu_power_mode(void __iomem *scu_base, unsigned int logical_cpu);
#else
static inline unsigned int scu_get_core_count(void __iomem *scu_base)
{
@@ -35,6 +39,16 @@ static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
return -EINVAL;
}
+static inline int scu_cpu_power_enable(void __iomem *scu_base,
+ unsigned int mode)
+{
+ return -EINVAL;
+}
+static inline int scu_get_cpu_power_mode(void __iomem *scu_base,
+ unsigned int logical_cpu)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU)
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 7b2899c2f7fc..c729d2113a24 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_SMP_TWD_H
#define __ASMARM_SMP_TWD_H
@@ -18,20 +19,4 @@
#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-#include <linux/ioport.h>
-
-struct twd_local_timer {
- struct resource res[2];
-};
-
-#define DEFINE_TWD_LOCAL_TIMER(name,base,irq) \
-struct twd_local_timer name __initdata = { \
- .res = { \
- DEFINE_RES_MEM(base, 0x10), \
- DEFINE_RES_IRQ(irq), \
- }, \
-};
-
-int twd_local_timer_register(struct twd_local_timer *);
-
#endif
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index 73e5e8513751..421e3415338a 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_SPARSEMEM_H
#define ASMARM_SPARSEMEM_H
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Two definitions are required for sparsemem:
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644
index 000000000000..d9c28b3b6b62
--- /dev/null
+++ b/arch/arm/include/asm/spectre.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+enum {
+ __SPECTRE_V2_METHOD_BPIALL,
+ __SPECTRE_V2_METHOD_ICIALLU,
+ __SPECTRE_V2_METHOD_SMC,
+ __SPECTRE_V2_METHOD_HVC,
+ __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+ SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+ SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+ SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+ SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+ SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+#else
+static inline void spectre_v2_update_state(unsigned int state,
+ unsigned int methods)
+{}
+#endif
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+void cpu_v7_ca8_ibe(void);
+void cpu_v7_ca15_ibe(void);
+void cpu_v7_bugs_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4bec45442072..f610a773f2be 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
@@ -21,7 +22,7 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
@@ -52,24 +53,6 @@ static inline void dsb_sev(void)
* memory.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u16 owner = READ_ONCE(lock->tickets.owner);
-
- for (;;) {
- arch_spinlock_t tmp = READ_ONCE(*lock);
-
- if (tmp.tickets.owner == tmp.tickets.next ||
- tmp.tickets.owner != owner)
- break;
-
- wfe();
- }
- smp_acquire__after_ctrl_dep();
-}
-
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -89,7 +72,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
@@ -209,9 +192,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
dsb_sev();
}
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -230,11 +210,12 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
prefetchw(&rw->lock);
__asm__ __volatile__(
+" .syntax unified\n"
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
WFE("mi")
-" rsbpls %0, %1, #0\n"
+" rsbspl %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
@@ -289,14 +270,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
}
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 47663fcb10ad..5404a2a96bf3 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "Please do not include this file directly."
#endif
#define TICKET_SHIFT 16
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
index de003327be97..0bd4979759f1 100644
--- a/arch/arm/include/asm/stackprotector.h
+++ b/arch/arm/include/asm/stackprotector.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* GCC stack protector support.
*
@@ -5,15 +6,16 @@
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
- * we cannot have a different canary value per task.
+ * "__stack_chk_guard" on ARM. This prevents SMP systems from using a
+ * different value for each task unless we enable a GCC plugin that
+ * replaces these symbol references with references to each task's own
+ * value.
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
-#include <linux/random.h>
-#include <linux/version.h>
+#include <asm/thread_info.h>
extern unsigned long __stack_chk_guard;
@@ -25,14 +27,12 @@ extern unsigned long __stack_chk_guard;
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= LINUX_VERSION_CODE;
+ unsigned long canary = get_random_canary();
current->stack_canary = canary;
+#ifndef CONFIG_STACKPROTECTOR_PER_TASK
__stack_chk_guard = current->stack_canary;
+#endif
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 7722201ead19..ba2f771cca23 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
+#include <linux/llist.h>
#include <asm/ptrace.h>
+#include <asm/sections.h>
struct stackframe {
/*
@@ -12,8 +15,25 @@ struct stackframe {
unsigned long sp;
unsigned long lr;
unsigned long pc;
+
+ /* address of the LR value on the stack */
+ unsigned long *lr_addr;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+ struct task_struct *tsk;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ bool ex_frame;
+#endif
};
+static inline bool on_thread_stack(void)
+{
+ unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack;
+
+ return delta < THREAD_SIZE;
+}
+
static __always_inline
void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
{
@@ -21,10 +41,21 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
frame->sp = regs->ARM_sp;
frame->lr = regs->ARM_lr;
frame->pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = current;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ frame->ex_frame = in_entry_text(frame->pc);
+#endif
}
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data);
+ bool (*fn)(void *, unsigned long), void *data);
+extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
deleted file mode 100644
index 460d616bb2d6..000000000000
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * stage2 page table helpers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_S2_PGTABLE_H_
-#define __ARM_S2_PGTABLE_H_
-
-#define stage2_pgd_none(pgd) pgd_none(pgd)
-#define stage2_pgd_clear(pgd) pgd_clear(pgd)
-#define stage2_pgd_present(pgd) pgd_present(pgd)
-#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(pud) pud_free(NULL, pud)
-
-#define stage2_pud_none(pud) pud_none(pud)
-#define stage2_pud_clear(pud) pud_clear(pud)
-#define stage2_pud_present(pud) pud_present(pud)
-#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(pmd) pmd_free(NULL, pmd)
-
-#define stage2_pud_huge(pud) pud_huge(pud)
-
-/* Open coded p*d_addr_end that can deal with 64bit addresses */
-static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pud_addr_end(addr, end) (end)
-
-static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pgd_index(addr) pgd_index(addr)
-
-#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define stage2_pud_table_empty(pudp) false
-
-#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index cf4f3aad0fc1..6c607c68f3ad 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -1,9 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_STRING_H
#define __ASM_ARM_STRING_H
/*
* We don't do inline string functions, since the
* optimised inline asm versions are not small.
+ *
+ * The __underscore versions of some functions are for KASan to be able
+ * to replace them with instrumented versions.
*/
#define __HAVE_ARCH_STRRCHR
@@ -14,28 +18,51 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
-extern void __memzero(void *ptr, __kernel_size_t n);
-
-#define memset(p,v,n) \
- ({ \
- void *__p = (p); size_t __n = n; \
- if ((__n) != 0) { \
- if (__builtin_constant_p((v)) && (v) == 0) \
- __memzero((__p),(__n)); \
- else \
- memset((__p),(v),(__n)); \
- } \
- (__p); \
- })
+#define __HAVE_ARCH_MEMSET32
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+ return __memset32(p, v, n * 4);
+}
+
+#define __HAVE_ARCH_MEMSET64
+extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+ return __memset64(p, v, n * 8, v >> 32);
+}
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * must use non-instrumented versions of the mem*
+ * functions named __memcpy() etc. All such kernel code has
+ * been tagged with KASAN_SANITIZE_file.o = n, which means
+ * that the address sanitization argument isn't passed to the
+ * compiler, and __SANITIZE_ADDRESS__ is not set. As a result
+ * these defines kick in.
+ */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 6c7182f32cef..be81b9ca2ea1 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -1,13 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H
+#include <linux/types.h>
+
struct sleep_save_sp {
u32 *save_ptr_stash;
u32 save_ptr_stash_phys;
};
extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
extern void cpu_resume_arm(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
+extern void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr);
#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index 537fc9b91889..c6051823048b 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/byteorder.h
*
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index 12ebfcc1d539..9372348516ce 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SWITCH_TO_H
#define __ASM_ARM_SWITCH_TO_H
#include <linux/thread_info.h>
+#include <asm/smp_plat.h>
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
@@ -9,7 +11,7 @@
* to ensure that the maintenance completes in case we migrate to another
* CPU.
*/
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define __complete_pending_tlbi() dsb(ish)
#else
#define __complete_pending_tlbi()
@@ -25,6 +27,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
+ if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
+ __this_cpu_write(__entry_task, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 9732b8e11e63..f46b3c570f92 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SYNC_BITOPS_H__
#define __ASM_SYNC_BITOPS_H__
@@ -13,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
+/*
+ * Unordered
+ */
+
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_mb__before_atomic(); \
+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
+ __smp_mb__after_atomic(); \
+ __ret; \
+})
#endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index e86c985b8c7a..18b102a30741 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access to user system call parameters and results
*
@@ -21,7 +22,24 @@ extern const unsigned long sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return task_thread_info(task)->syscall;
+ if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ return task_thread_info(task)->abi_syscall;
+
+ if (task_thread_info(task)->abi_syscall == -1)
+ return -1;
+
+ return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+}
+
+static inline bool __in_oabi_syscall(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_OABI_COMPAT) &&
+ (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
+}
+
+static inline bool in_oabi_syscall(void)
+{
+ return __in_oabi_syscall(current);
}
static inline void syscall_rollback(struct task_struct *task,
@@ -50,60 +68,56 @@ static inline void syscall_set_return_value(struct task_struct *task,
regs->ARM_r0 = (long) error ? error : val;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ if (nr == -1) {
+ task_thread_info(task)->abi_syscall = -1;
+ /*
+ * When the syscall number is set to -1, the syscall will be
+ * skipped. In this case the syscall return value has to be
+ * set explicitly, otherwise the first syscall argument is
+ * returned as the syscall return value.
+ */
+ syscall_set_return_value(task, regs, -ENOSYS, 0);
+ return;
+ }
+ if ((IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))) {
+ task_thread_info(task)->abi_syscall = nr;
+ return;
+ }
+ task_thread_info(task)->abi_syscall =
+ (task_thread_info(task)->abi_syscall & ~__NR_SYSCALL_MASK) |
+ (nr & __NR_SYSCALL_MASK);
+}
+
#define SYSCALL_MAX_ARGS 7
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
- unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
- pr_warn("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- args[0] = regs->ARM_ORIG_r0;
- args++;
- i++;
- n--;
- }
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
- memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+ memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- pr_warn("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- regs->ARM_ORIG_r0 = args[0];
- args++;
- i++;
- n--;
- }
-
- memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+ memcpy(&regs->ARM_r0, args, 6 * sizeof(args[0]));
+ /*
+ * Also copy the first argument into ARM_ORIG_r0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->ARM_ORIG_r0 = regs->ARM_r0;
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
/* ARM tasks don't change audit architectures on the fly. */
return AUDIT_ARCH_ARM;
diff --git a/arch/arm/include/asm/syscalls.h b/arch/arm/include/asm/syscalls.h
new file mode 100644
index 000000000000..5912e7cffa6a
--- /dev/null
+++ b/arch/arm/include/asm/syscalls.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_SYSCALLS_H
+#define __ASM_SYSCALLS_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+ loff_t offset, loff_t len);
+
+struct oldabi_stat64;
+asmlinkage long sys_oabi_stat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_lstat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstatat64(int dfd,
+ const char __user *filename,
+ struct oldabi_stat64 __user *statbuf,
+ int flag);
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+struct oabi_epoll_event;
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event);
+struct oabi_sembuf;
+struct old_timespec32;
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout);
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops);
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth);
+struct sockaddr;
+asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
+ size_t len, unsigned flags,
+ struct sockaddr __user *addr,
+ int addrlen);
+struct user_msghdr;
+asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args);
+
+#endif
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index 3860cbd401ec..ef7fdb588b5f 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SYSTEM_INFO_H
#define __ASM_ARM_SYSTEM_INFO_H
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 8c4a89f5ce7d..98b37340376b 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SYSTEM_MISC_H
#define __ASM_ARM_SYSTEM_MISC_H
@@ -7,13 +8,27 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/reboot.h>
+#include <linux/percpu.h>
extern void cpu_init(void);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+typedef void (*harden_branch_predictor_fn_t)(void);
+DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+static inline void harden_branch_predictor(void)
+{
+ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
+ smp_processor_id());
+ if (fn)
+ fn();
+}
+#else
+#define harden_branch_predictor() do { } while (0)
+#endif
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
@@ -22,11 +37,6 @@ extern void (*arm_pm_idle)(void);
extern unsigned int user_debug;
-static inline int handle_guest_sea(phys_addr_t addr, unsigned int esr)
-{
- return -1;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
index 8578d726ad78..e1f7dca86a22 100644
--- a/arch/arm/include/asm/tcm.h
+++ b/arch/arm/include/asm/tcm.h
@@ -1,33 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
*
* Author: Rickard Andersson <rickard.andersson@stericsson.com>
* Author: Linus Walleij <linus.walleij@stericsson.com>
- *
*/
#ifndef __ASMARM_TCM_H
#define __ASMARM_TCM_H
-#ifndef CONFIG_HAVE_TCM
-#error "You should not be including tcm.h unless you have a TCM!"
-#endif
+#ifdef CONFIG_HAVE_TCM
#include <linux/compiler.h>
/* Tag variables with this */
-#define __tcmdata __section(.tcm.data)
+#define __tcmdata __section(".tcm.data")
/* Tag constants with this */
-#define __tcmconst __section(.tcm.rodata)
+#define __tcmconst __section(".tcm.rodata")
/* Tag functions inside TCM called from outside TCM with this */
-#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
+#define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline
/* Tag function inside TCM called from inside TCM with this */
-#define __tcmlocalfunc __section(.tcm.text)
+#define __tcmlocalfunc __section(".tcm.text")
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
bool tcm_dtcm_present(void);
bool tcm_itcm_present(void);
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+static inline void tcm_init(void)
+{
+}
+#endif
#endif
diff --git a/arch/arm/include/asm/patch.h b/arch/arm/include/asm/text-patching.h
index 77e054c2f6cd..0b48247c4600 100644
--- a/arch/arm/include/asm/patch.h
+++ b/arch/arm/include/asm/text-patching.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KERNEL_PATCH_H
#define _ARM_KERNEL_PATCH_H
diff --git a/arch/arm/include/asm/therm.h b/arch/arm/include/asm/therm.h
index f002f0197d78..17b0bc9b5e9b 100644
--- a/arch/arm/include/asm/therm.h
+++ b/arch/arm/include/asm/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/therm.h: Definitions for Dallas Semiconductor
* DS1620 thermometer driver (as used in the Rebel.com NetWinder)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d1604a..943ffcf069d2 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/thread_info.h
*
* Copyright (C) 2002 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_THREAD_INFO_H
#define __ASM_ARM_THREAD_INFO_H
@@ -16,17 +13,34 @@
#include <asm/fpstate.h>
#include <asm/page.h>
+#ifdef CONFIG_KASAN
+/*
+ * KASan uses a lot of extra stack space so the thread size order needs to
+ * be increased.
+ */
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
+#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
#ifndef __ASSEMBLY__
struct task_struct;
-#include <asm/types.h>
+DECLARE_PER_CPU(struct task_struct *, __entry_task);
-typedef unsigned long mm_segment_t;
+#include <asm/types.h>
+#include <asm/traps.h>
struct cpu_context_save {
__u32 r4;
@@ -49,17 +63,11 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
- __u32 syscall; /* syscall number */
- __u8 used_cp[16]; /* thread used copro */
+ __u32 abi_syscall; /* ABI type and syscall nr */
unsigned long tp_value[2]; /* TLS registers */
-#ifdef CONFIG_CRUNCH
- struct crunch_state crunchstate;
-#endif
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
#ifdef CONFIG_ARM_THUMBEE
@@ -69,29 +77,13 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-/*
- * how to get the current stack pointer in C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-static inline struct thread_info *current_thread_info(void)
+static inline struct task_struct *thread_task(struct thread_info* ti)
{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
+ return (struct task_struct *)ti;
}
#define thread_saved_pc(tsk) \
@@ -107,47 +99,60 @@ static inline struct thread_info *current_thread_info(void)
((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
#endif
-extern void crunch_task_disable(struct thread_info *);
-extern void crunch_task_copy(struct thread_info *, void *);
-extern void crunch_task_restore(struct thread_info *, void *);
-extern void crunch_task_release(struct thread_info *);
-
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
extern void iwmmxt_task_restore(struct thread_info *, void *);
extern void iwmmxt_task_release(struct thread_info *);
extern void iwmmxt_task_switch(struct thread_info *);
+extern int iwmmxt_undef_handler(struct pt_regs *, u32);
+
+static inline void register_iwmmxt_undef_handler(void)
+{
+ static struct undef_hook iwmmxt_undef_hook = {
+ .instr_mask = 0x0c000e00,
+ .instr_val = 0x0c000000,
+ .cpsr_mask = MODE_MASK | PSR_T_BIT,
+ .cpsr_val = USR_MODE,
+ .fn = iwmmxt_undef_handler,
+ };
+
+ register_undef_hook(&iwmmxt_undef_hook);
+}
+
extern void vfp_sync_hwstate(struct thread_info *);
extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
#endif
/*
* thread information flags:
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
+ *
+ * Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
*/
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
-#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
-#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 20
+#define TIF_RESTORE_SIGMASK 19
+#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -157,6 +162,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
/* Checks for any syscall work in entry-common.S */
@@ -167,7 +173,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index 1dc980675894..1c1542e2ed63 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/thread_notify.h
*
* Copyright (C) 2006 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_THREAD_NOTIFY_H
#define ASMARM_THREAD_NOTIFY_H
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index f6fcc67ef06e..6d1337c169cd 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/timex.h
*
* Copyright (C) 1997,1998 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Architecture Specific TIME specifications
*/
#ifndef _ASMARM_TIMEX_H
@@ -14,5 +11,6 @@
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3f2eb76243e3..ea4fbe7b17f6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/tlb.h
*
* Copyright (C) 2002 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Experimentation shows that on a StrongARM, it appears to be faster
* to use the "invalidate whole tlb" rather than "invalidate single
* tlb" for this.
@@ -29,260 +26,34 @@
#else /* !CONFIG_MMU */
-#include <linux/swap.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-
-#define MMU_GATHER_BUNDLE 8
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
-
-struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
-};
-
-#define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
-#else
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- struct mmu_table_batch *batch;
- unsigned int need_flush;
-#endif
- unsigned int fullmm;
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
- unsigned int max;
- struct page **pages;
- struct page *local[MMU_GATHER_BUNDLE];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(struct page *);
- }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- tlb_flush(tlb);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- __tlb_alloc_page(tlb);
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
-#endif
-}
-
-static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
+#include <asm-generic/tlb.h>
static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->pages[tlb->nr++] = page;
- VM_WARN_ON(tlb->nr > tlb->max);
- if (tlb->nr == tlb->max)
- return true;
- return false;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- return __tlb_remove_page(tlb, page);
-}
+ struct ptdesc *ptdesc = page_ptdesc(pte);
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
-{
- pgtable_page_dtor(pte);
-
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
-#else
+#ifndef CONFIG_ARM_LPAE
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
- addr &= PMD_MASK;
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
- tlb_add_flush(tlb, addr + SZ_1M);
+ addr = (addr & PMD_MASK) + SZ_1M;
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_entry(tlb, pte);
-}
-
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long addr)
-{
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
-#endif
+ tlb_remove_ptdesc(tlb, ptdesc);
}
static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
- tlb_add_flush(tlb, addr);
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
+#ifdef CONFIG_ARM_LPAE
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
+ tlb_remove_ptdesc(tlb, ptdesc);
+#endif
}
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 1897b5196fb5..38c6e4a2a0b6 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/tlbflush.h
*
* Copyright (C) 1999-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
@@ -256,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -264,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
@@ -629,18 +619,22 @@ extern void flush_bp_all(void);
* If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page. On ARMv6 and later, the cache coherency is handled via
- * the set_pte_at() function.
+ * the set_ptes() function.
*/
#if __LINUX_ARM_ARCH__ < 6
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep);
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
#else
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ unsigned int nr)
{
}
#endif
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 5f833f7adba1..3dcd0f71a0da 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_TLS_H
#define __ASMARM_TLS_H
@@ -11,44 +12,59 @@
.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
- mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
+ @ TLS register update is deferred until return to user space
+ mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
- ldr \tmp1, =elf_hwcap
- ldr \tmp1, [\tmp1, #0]
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+ .subsection 1
+#endif
+.L0_\@:
+ ldr_va \tmp1, elf_hwcap
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
- mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
- strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+ beq .L2_\@
+ mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+#ifdef CONFIG_SMP
+ b .L1_\@
+ .previous
+#endif
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
.endm
.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
+#else
+#include <asm/smp_plat.h>
#endif
#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu 1
#define has_tls_reg 1
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
+#define defer_tls_reg_update is_smp()
#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
+#define defer_tls_reg_update 1
#define switch_tls switch_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_software
#endif
@@ -77,10 +93,10 @@ static inline void set_tls(unsigned long val)
barrier();
if (!tls_emu) {
- if (has_tls_reg) {
+ if (has_tls_reg && !defer_tls_reg_update) {
asm("mcr p15, 0, %0, c13, c0, 3"
: : "r" (val));
- } else {
+ } else if (!has_tls_reg) {
#ifdef CONFIG_KUSER_HELPERS
/*
* User space must never try to access this
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 370f7a732900..ad36b6570067 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -1,28 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
#ifdef CONFIG_ARM_CPU_TOPOLOGY
#include <linux/cpumask.h>
+#include <linux/arch_topology.h>
+
+/* big.LITTLE switcher is incompatible with frequency invariance */
+#ifndef CONFIG_BL_SWITCHER
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
+#endif
-struct cputopo_arm {
- int thread_id;
- int core_id;
- int socket_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
-};
-
-extern struct cputopo_arm cpu_topology[NR_CPUS];
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
+/* Replace task scheduler's default HW pressure API */
+#define arch_scale_hw_pressure topology_get_hw_pressure
+#define arch_update_hw_pressure topology_update_hw_pressure
#else
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f555bb3664dc..2621b9fb9b19 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_TRAP_H
#define _ASMARM_TRAP_H
+#include <linux/linkage.h>
#include <linux/list.h>
struct pt_regs;
@@ -18,7 +20,6 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
extern char __irqentry_text_start[];
@@ -27,29 +28,21 @@ static inline int __in_irqentry_text(unsigned long ptr)
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
- return 0;
-}
-#endif
-
-static inline int in_exception_text(unsigned long ptr)
-{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
- int in;
-
- in = ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
- return in ? : __in_irqentry_text(ptr);
-}
-
-extern void __init early_trap_init(void *);
-extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
-extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
+extern void early_trap_init(void *);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
+extern void ptrace_break(struct pt_regs *regs);
extern void *vectors_page;
+asmlinkage void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl);
+asmlinkage void do_undefinstr(struct pt_regs *regs);
+asmlinkage void handle_fiq_as_nmi(struct pt_regs *regs);
+asmlinkage void bad_mode(struct pt_regs *regs, int reason);
+asmlinkage int arm_syscall(int no, struct pt_regs *regs);
+asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs);
+asmlinkage void __div0(void);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+
#endif
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h
deleted file mode 100644
index 00748350cf72..000000000000
--- a/arch/arm/include/asm/trusted_foundations.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Support for the Trusted Foundations secure monitor.
- *
- * Trusted Foundation comes active on some ARM consumer devices (most
- * Tegra-based devices sold on the market are concerned). Such devices can only
- * perform some basic operations, like setting the CPU reset vector, through
- * SMC calls to the secure monitor. The calls are completely specific to
- * Trusted Foundations, and do *not* follow the SMC calling convention or the
- * PSCI standard.
- */
-
-#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H
-#define __ASM_ARM_TRUSTED_FOUNDATIONS_H
-
-#include <linux/printk.h>
-#include <linux/bug.h>
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-
-struct trusted_foundations_platform_data {
- unsigned int version_major;
- unsigned int version_minor;
-};
-
-#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS)
-
-void register_trusted_foundations(struct trusted_foundations_platform_data *pd);
-void of_register_trusted_foundations(void);
-
-#else /* CONFIG_TRUSTED_FOUNDATIONS */
-
-static inline void register_trusted_foundations(
- struct trusted_foundations_platform_data *pd)
-{
- /*
- * If the system requires TF and we cannot provide it, continue booting
- * but disable features that cannot be provided.
- */
- pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
- pr_err("Secondary processors as well as CPU PM will be disabled.\n");
-#if IS_ENABLED(CONFIG_SMP)
- setup_max_cpus = 0;
-#endif
- cpu_idle_poll_ctrl(true);
-}
-
-static inline void of_register_trusted_foundations(void)
-{
- /*
- * If we find the target should enable TF but does not support it,
- * fail as the system won't be able to do much anyway
- */
- if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"))
- register_trusted_foundations(NULL);
-}
-#endif /* CONFIG_TRUSTED_FOUNDATIONS */
-
-#endif
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644
index 000000000000..4bccd895d954
--- /dev/null
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+ * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+ * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ orr \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ bic \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#else
+
+ .macro uaccess_disable, tmp, isb=1
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ .endm
+
+#endif
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...) x
+#else
+#define PAN(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ PAN( mrc p15, 0, \tmp0, c2, c0, 2)
+ PAN( str \tmp0, [sp, #SVC_TTBCR])
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ PAN( ldr \tmp0, [sp, #SVC_TTBCR])
+ PAN( mcr p15, 0, \tmp0, c2, c0, 2)
+ .endm
+
+#undef DACR
+#undef PAN
+
+#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0bf2347495f1..d6ae80b5df36 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/uaccess.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
@@ -11,10 +8,14 @@
/*
* User space memory access functions
*/
+#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/domain.h>
+#include <linux/unaligned.h>
#include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
#include <asm/compiler.h>
#include <asm/extable.h>
@@ -25,9 +26,10 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
-static inline unsigned int uaccess_save_and_enable(void)
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
@@ -35,19 +37,49 @@ static inline unsigned int uaccess_save_and_enable(void)
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
+}
+
+static __always_inline void uaccess_restore(unsigned int flags)
+{
+ /* Restore the user access mask */
+ set_domain(flags);
+}
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_ttbcr = cpu_get_ttbcr();
+
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+ isb();
+
+ return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+ cpu_set_ttbcr(flags);
+ isb();
+}
+
#else
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
return 0;
-#endif
}
static inline void uaccess_restore(unsigned int flags)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /* Restore the user access mask */
- set_domain(flags);
-#endif
}
+#endif
+
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
@@ -55,35 +87,42 @@ static inline void uaccess_restore(unsigned int flags)
extern int __get_user_bad(void);
extern int __put_user_bad(void);
-/*
- * Note that this is actually 0x1,0000,0000
- */
-#define KERNEL_DS 0x00000000
-#define get_ds() (KERNEL_DS)
-
#ifdef CONFIG_MMU
-#define USER_DS TASK_SIZE
-#define get_fs() (current_thread_info()->addr_limit)
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-static inline void set_fs(mm_segment_t fs)
+/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
{
- current_thread_info()->addr_limit = fs;
- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " .syntax unified\n"
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subshs %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (TASK_SIZE)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
}
-#define segment_eq(a, b) ((a) == (b))
-
-/* We use 33-bit arithmetic here... */
-#define __range_ok(addr, size) ({ \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
- : "=&r" (flag), "=&r" (roksum) \
- : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; })
-
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@@ -104,16 +143,6 @@ extern int __get_user_64t_1(void *);
extern int __get_user_64t_2(void *);
extern int __get_user_64t_4(void *);
-#define __GUP_CLOBBER_1 "lr", "cc"
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define __GUP_CLOBBER_2 "ip", "lr", "cc"
-#else
-#define __GUP_CLOBBER_2 "lr", "cc"
-#endif
-#define __GUP_CLOBBER_4 "lr", "cc"
-#define __GUP_CLOBBER_32t_8 "lr", "cc"
-#define __GUP_CLOBBER_8 "lr", "cc"
-
#define __get_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
@@ -121,7 +150,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
- : __GUP_CLOBBER_##__s)
+ : "ip", "lr", "cc")
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
@@ -143,7 +172,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_64t_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
- : __GUP_CLOBBER_##__s)
+ : "ip", "lr", "cc")
#else
#define __get_user_x_64t __get_user_x
#endif
@@ -151,12 +180,13 @@ extern int __get_user_64t_4(void *);
#define __get_user_check(x, p) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
- register typeof(x) __r2 asm("r2"); \
+ unsigned long __limit = TASK_SIZE - 1; \
+ register typeof(*(p)) __user *__p asm("r0") = (p); \
+ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
+ int __tmp_e; \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -184,9 +214,10 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ __tmp_e = __e; \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
- __e; \
+ __tmp_e; \
})
#define get_user(x, p) \
@@ -202,7 +233,7 @@ extern int __put_user_8(void *, unsigned long long);
#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ unsigned long __limit = TASK_SIZE - 1; \
register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
@@ -219,29 +250,22 @@ extern int __put_user_8(void *, unsigned long long);
#else /* CONFIG_MMU */
-/*
- * uClinux has only one addr space, so has simplified address limits.
- */
-#define USER_DS KERNEL_DS
-
-#define segment_eq(a, b) (1)
-#define __addr_ok(addr) ((void)(addr), 1)
-#define __range_ok(addr, size) ((void)(addr), 0)
-#define get_fs() (KERNEL_DS)
-
-static inline void set_fs(mm_segment_t fs)
-{
-}
-
#define get_user(x, p) __get_user(x, p)
#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
-#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
+#include <asm-generic/access_ok.h>
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : get_fs())
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
/*
* The "__xxx" versions of the user access functions do not verify the
@@ -255,37 +279,40 @@ static inline void set_fs(mm_segment_t fs)
#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
+ __get_user_err((x), (ptr), __gu_err, TUSER()); \
__gu_err; \
})
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_err((x), (ptr), err); \
- (void) 0; \
-})
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_err(x, ptr, err) \
+#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
- unsigned long __gu_val; \
+ __long_type(x) __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
- case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
- case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
+ case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
+#endif
#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -301,30 +328,54 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-#define __get_user_asm_byte(x, addr, err) \
- __get_user_asm(x, addr, err, ldrb)
+#define __get_user_asm_byte(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrh" __t)
+
+#else
#ifndef __ARMEB__
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = __b1 | (__b2 << 8); \
})
#else
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = (__b1 << 8) | __b2; \
})
#endif
-#define __get_user_asm_word(x, addr, err) \
- __get_user_asm(x, addr, err, ldr)
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __get_user_asm_word(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldr" __t)
+#ifdef __ARMEB__
+#define __WORD0_OFFS 4
+#define __WORD1_OFFS 0
+#else
+#define __WORD0_OFFS 0
+#define __WORD1_OFFS 4
+#endif
+
+#define __get_user_asm_dword(x, addr, err, __t) \
+ ({ \
+ unsigned long __w0, __w1; \
+ __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
+ __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
+ (x) = ((u64)__w1 << 32) | (u64) __w0; \
+})
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
@@ -350,6 +401,14 @@ do { \
__pu_err; \
})
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
@@ -357,16 +416,10 @@ do { \
__pu_err; \
})
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
- (void) 0; \
-})
-
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
- __put_user_nocheck_##__size(x, __pu_addr, __err); \
+ __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
} while (0)
#define __put_user_nocheck_1 __put_user_asm_byte
@@ -374,9 +427,11 @@ do { \
#define __put_user_nocheck_4 __put_user_asm_word
#define __put_user_nocheck_8 __put_user_asm_dword
+#endif /* !CONFIG_CPU_SPECTRE */
+
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -391,27 +446,36 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
-#define __put_user_asm_byte(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, strb)
+#define __put_user_asm_byte(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strh" __t)
+
+#else
#ifndef __ARMEB__
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp, __pu_addr, err); \
- __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
})
#else
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
- __put_user_asm_byte(__temp, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
})
#endif
-#define __put_user_asm_word(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, str)
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __put_user_asm_word(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "str" __t)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -421,12 +485,12 @@ do { \
#define __reg_oper1 "%R2"
#endif
-#define __put_user_asm_dword(x, __pu_addr, err) \
+#define __put_user_asm_dword(x, __pu_addr, err, __t) \
__asm__ __volatile__( \
- ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
- ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
- THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
- THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
+ ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -442,6 +506,52 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (src); \
+ unsigned long __src = (unsigned long)(__pk_ptr); \
+ type __val; \
+ int __err = 0; \
+ switch (sizeof(type)) { \
+ case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
+ case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
+ case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
+ case 8: { \
+ u32 *__v32 = (u32*)&__val; \
+ __get_user_asm_word(__v32[0], __src, __err, ""); \
+ if (__err) \
+ break; \
+ __get_user_asm_word(__v32[1], __src+4, __err, ""); \
+ break; \
+ } \
+ default: __err = __get_user_bad(); break; \
+ } \
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
+ put_unaligned(__val, (type *)(dst)); \
+ else \
+ *(type *)(dst) = __val; /* aligned by caller */ \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (dst); \
+ unsigned long __dst = (unsigned long)__pk_ptr; \
+ int __err = 0; \
+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
+ ? get_unaligned((type *)(src)) \
+ : *(type *)(src); /* aligned by caller */ \
+ switch (sizeof(type)) { \
+ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
+ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
+ case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
+ case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ if (__err) \
+ goto err_label; \
+} while (0)
#ifdef CONFIG_MMU
extern unsigned long __must_check
@@ -511,7 +621,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
n = __clear_user(to, n);
return n;
}
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 921d8274855c..4048c92d9c2b 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_UCONTEXT_H
#define _ASMARM_UCONTEXT_H
#include <asm/fpstate.h>
+#include <asm/user.h>
/*
* struct sigcontext only has room for the basic registers, but struct
@@ -41,17 +43,6 @@ struct ucontext {
*/
#define DUMMY_MAGIC 0xb0d9ed01
-#ifdef CONFIG_CRUNCH
-#define CRUNCH_MAGIC 0x5065cf03
-#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
-
-struct crunch_sigframe {
- unsigned long magic;
- unsigned long size;
- struct crunch_state storage;
-} __attribute__((__aligned__(8)));
-#endif
-
#ifdef CONFIG_IWMMXT
/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */
#define IWMMXT_MAGIC 0x12ef842a
@@ -90,9 +81,6 @@ struct vfp_sigframe
* one of these.
*/
struct aux_sigframe {
-#ifdef CONFIG_CRUNCH
- struct crunch_sigframe crunch;
-#endif
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index a91ae499614c..ce9689118dbb 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -1,27 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/asm-arm/unified.h - Unified Assembler Syntax helper macros
*
* Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_UNIFIED_H
#define __ASM_UNIFIED_H
-#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
+#if defined(__ASSEMBLY__)
.syntax unified
+#else
+__asm__(".syntax unified");
#endif
#ifdef CONFIG_CPU_V7M
@@ -34,10 +24,6 @@
#ifdef CONFIG_THUMB2_KERNEL
-#if __GNUC__ < 4
-#error Thumb-2 kernel requires gcc >= 4
-#endif
-
/* The CPSR bit describing the instruction set (Thumb) */
#define PSR_ISETSTATE PSR_T_BIT
@@ -64,77 +50,4 @@
#endif /* CONFIG_THUMB2_KERNEL */
-#ifndef CONFIG_ARM_ASM_UNIFIED
-
-/*
- * If the unified assembly syntax isn't used (in ARM mode), these
- * macros expand to an empty string
- */
-#ifdef __ASSEMBLY__
- .macro it, cond
- .endm
- .macro itt, cond
- .endm
- .macro ite, cond
- .endm
- .macro ittt, cond
- .endm
- .macro itte, cond
- .endm
- .macro itet, cond
- .endm
- .macro itee, cond
- .endm
- .macro itttt, cond
- .endm
- .macro ittte, cond
- .endm
- .macro ittet, cond
- .endm
- .macro ittee, cond
- .endm
- .macro itett, cond
- .endm
- .macro itete, cond
- .endm
- .macro iteet, cond
- .endm
- .macro iteee, cond
- .endm
-#else /* !__ASSEMBLY__ */
-__asm__(
-" .macro it, cond\n"
-" .endm\n"
-" .macro itt, cond\n"
-" .endm\n"
-" .macro ite, cond\n"
-" .endm\n"
-" .macro ittt, cond\n"
-" .endm\n"
-" .macro itte, cond\n"
-" .endm\n"
-" .macro itet, cond\n"
-" .endm\n"
-" .macro itee, cond\n"
-" .endm\n"
-" .macro itttt, cond\n"
-" .endm\n"
-" .macro ittte, cond\n"
-" .endm\n"
-" .macro ittet, cond\n"
-" .endm\n"
-" .macro ittee, cond\n"
-" .endm\n"
-" .macro itett, cond\n"
-" .endm\n"
-" .macro itete, cond\n"
-" .endm\n"
-" .macro iteet, cond\n"
-" .endm\n"
-" .macro iteee, cond\n"
-" .endm\n");
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_ARM_ASM_UNIFIED */
-
#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 076090d2dbf5..9fb00973c608 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/unistd.h
*
* Copyright (C) 2001-2005 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Please forward _all_ changes to this file to rmk@arm.linux.org.uk,
* no matter what the change is. Thanks!
*/
@@ -16,23 +13,23 @@
#include <uapi/asm/unistd.h>
#include <asm/unistd-nr.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
+#define __ARCH_WANT_SYS_UTIME32
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
-#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_TIME32
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
@@ -45,7 +42,6 @@
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64
-#define __IGNORE_migrate_pages
#ifdef __ARM_EABI__
/*
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index d1c3f3a71c94..d60b09a5acfc 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/unwind.h
*
* Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_UNWIND_H
@@ -36,6 +24,7 @@ struct unwind_idx {
struct unwind_table {
struct list_head list;
+ struct list_head mod_list;
const struct unwind_idx *start;
const struct unwind_idx *origin;
const struct unwind_idx *stop;
@@ -48,7 +37,12 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
unsigned long text_addr,
unsigned long text_size);
extern void unwind_table_del(struct unwind_table *tab);
-extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
+
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
index 9472c20b7d49..6a61b2874926 100644
--- a/arch/arm/include/asm/uprobes.h
+++ b/arch/arm/include/asm/uprobes.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_UPROBES_H
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 35917b3a97f9..167d44b550f4 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_USER_H
#define _ARM_USER_H
@@ -76,10 +77,6 @@ struct user{
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 1fd775c1bc5d..4512f7e1918f 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common defines for v7m cpus
*/
@@ -12,6 +13,7 @@
#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
#define V7M_SCB_VTOR 0x08
@@ -37,7 +39,7 @@
#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
-#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
/*
* When branching to an address that has bits [31:28] == 0xf an exception return
@@ -48,7 +50,7 @@
* (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
*/
#define EXC_RET_STACK_MASK 0x00000004
-#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
/* Cache related definitions */
@@ -57,6 +59,24 @@
#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */
+/* Memory-mapped MPU registers for M-class */
+#define MPU_TYPE 0x90
+#define MPU_CTRL 0x94
+#define MPU_CTRL_ENABLE 1
+#define MPU_CTRL_PRIVDEFENA (1 << 2)
+
+#define PMSAv7_RNR 0x98
+#define PMSAv7_RBAR 0x9c
+#define PMSAv7_RASR 0xa0
+
+#define PMSAv8_RNR 0x98
+#define PMSAv8_RBAR 0x9c
+#define PMSAv8_RLAR 0xa0
+#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
+#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
+#define PMSAv8_MAIR0 0xc0
+#define PMSAv8_MAIR1 0xc4
+
/* Cache opeartions */
#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index d0295f1dd1a3..88364a6727ff 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -1,8 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
#ifdef __KERNEL__
+#define __VDSO_PAGES 4
+
#ifndef __ASSEMBLY__
struct mm_struct;
@@ -11,8 +14,6 @@ struct mm_struct;
void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
-extern char vdso_start, vdso_end;
-
extern unsigned int vdso_total_pages;
#else /* CONFIG_VDSO */
diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..50c0b19fb755
--- /dev/null
+++ b/arch/arm/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h
new file mode 100644
index 000000000000..bed16fa1865e
--- /dev/null
+++ b/arch/arm/include/asm/vdso/cp15.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_CP15_H
+#define __ASM_VDSO_CP15_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15
+
+#include <linux/stringify.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
+#endif /* CONFIG_CPU_CP15 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CP15_H */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..1e9f81639c88
--- /dev/null
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 ARM Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+#include <asm/unistd.h>
+#include <asm/vdso/cp15.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline int gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct __kernel_old_timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres_time64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static inline bool arm_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
+}
+#define __arch_vdso_hres_capable arm_vdso_hres_capable
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode,
+ const struct vdso_time_data *vd)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u64 cycle_now;
+
+ /*
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
+ */
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
+
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
+
+ return cycle_now;
+#else
+ /* Make GCC happy. This is compiled out anyway */
+ return 0;
+#endif
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..45efb3ff511c
--- /dev/null
+++ b/arch/arm/include/asm/vdso/processor.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..ff1c729af05f
--- /dev/null
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/cacheflush.h>
+
+static __always_inline
+void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
+{
+ flush_dcache_page(virt_to_page(vdata));
+}
+#define __arch_sync_vdso_time_data __arch_sync_vdso_time_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
deleted file mode 100644
index 9be259442fca..000000000000
--- a/arch/arm/include/asm/vdso_datapage.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Adapted from arm64 version.
- *
- * Copyright (C) 2012 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_VDSO_DATAPAGE_H
-#define __ASM_VDSO_DATAPAGE_H
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-#include <asm/page.h>
-
-/* Try to be cache-friendly on systems that don't implement the
- * generic timer: fit the unconditionally updated fields in the first
- * 32 bytes.
- */
-struct vdso_data {
- u32 seq_count; /* sequence count - odd during updates */
- u16 tk_is_cntvct; /* fall back to syscall if false */
- u16 cs_shift; /* clocksource shift */
- u32 xtime_coarse_sec; /* coarse time */
- u32 xtime_coarse_nsec;
-
- u32 wtm_clock_sec; /* wall to monotonic offset */
- u32 wtm_clock_nsec;
- u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
- u32 cs_mult; /* clocksource multiplier */
-
- u64 cs_cycle_last; /* last cycle value */
- u64 cs_mask; /* clocksource mask */
-
- u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
- u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
- u32 tz_dsttime;
-};
-
-union vdso_data_store {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm/include/asm/vermagic.h b/arch/arm/include/asm/vermagic.h
new file mode 100644
index 000000000000..62ce94e26a63
--- /dev/null
+++ b/arch/arm/include/asm/vermagic.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+
+/*
+ * Add the ARM architecture version to the version magic string
+ */
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_ARMVSN \
+ MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_P2V
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index 22e414056a8c..85ccc422d4d0 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/vfp.h
*
@@ -8,14 +9,6 @@
#ifndef __ASM_VFP_H
#define __ASM_VFP_H
-#define FPSID cr0
-#define FPSCR cr1
-#define MVFR1 cr6
-#define MVFR0 cr7
-#define FPEXC cr8
-#define FPINST cr9
-#define FPINST2 cr10
-
/* FPSID bits */
#define FPSID_IMPLEMENTER_BIT (24)
#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT)
@@ -84,6 +77,12 @@
#define MVFR0_DP_BIT (8)
#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
+/* MVFR1 bits */
+#define MVFR1_ASIMDHP_BIT (20)
+#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
+#define MVFR1_FPHP_BIT (24)
+#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
+
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT)
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 301c1db3e99b..e2e1d5a3727a 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/vfpmacros.h
*
@@ -7,34 +8,35 @@
#include <asm/vfp.h>
-@ Macros to allow building with old toolkits (with no VFP support)
.macro VFPFMRX, rd, sysreg, cond
- MRC\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMRX \rd, \sysreg
+ vmrs\cond \rd, \sysreg
.endm
.macro VFPFMXR, sysreg, rd, cond
- MCR\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMXR \sysreg, \rd
+ vmsr\cond \sysreg, \rd
.endm
@ read all the working registers back into the VFP
.macro VFPFLDMIA, base, tmp
+ .fpu vfpv2
#if __LINUX_ARM_ARCH__ < 6
- LDC p11, cr0, [\base],#33*4 @ FLDMIAX \base!, {d0-d15}
+ fldmiax \base!, {d0-d15}
#else
- LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15}
+ vldmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
@@ -43,22 +45,23 @@
@ write all the working registers out of the VFP
.macro VFPFSTMIA, base, tmp
#if __LINUX_ARM_ARCH__ < 6
- STC p11, cr0, [\base],#33*4 @ FSTMIAX \base!, {d0-d15}
+ fstmiax \base!, {d0-d15}
#else
- STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15}
+ vstmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 91f40217bfa5..6c430ec371df 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_VGA_H
#define ASMARM_VGA_H
#include <linux/io.h>
extern unsigned long vga_base;
+extern struct screen_info vgacon_screen_info;
#define VGA_MAP_MEM(x,s) (vga_base + (x))
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 141144f333a2..dd9697b2bde8 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2012 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef VIRT_H
@@ -52,8 +39,6 @@ static inline void sync_boot_mode(void)
sync_cache_r(&__boot_cpu_mode);
}
-void __hyp_set_vectors(unsigned long phys_vector_base);
-void __hyp_reset_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
#define sync_boot_mode()
@@ -80,18 +65,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
-static inline bool has_vhe(void)
-{
- return false;
-}
-
-/* The section containing the hypervisor idmap text */
-extern char __hyp_idmap_text_start[];
-extern char __hyp_idmap_text_end[];
-
-/* The section containing the hypervisor text */
-extern char __hyp_text_start[];
-extern char __hyp_text_end[];
#endif
#else
@@ -100,9 +73,6 @@ extern char __hyp_text_end[];
#define HVC_SET_VECTORS 0
#define HVC_SOFT_RESTART 1
-#define HVC_RESET_VECTORS 2
-
-#define HVC_STUB_HCALL_NR 3
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644
index 000000000000..a9b3718b8600
--- /dev/null
+++ b/arch/arm/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..0341973e30e1
--- /dev/null
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x) x
+#else
+#define ARM_CPU_DISCARD(x) x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
+#endif
+
+#ifdef CONFIG_MMU
+#define ARM_MMU_KEEP(x) KEEP(x)
+#define ARM_MMU_DISCARD(x)
+#else
+#define ARM_MMU_KEEP(x)
+#define ARM_MMU_DISCARD(x) x
+#endif
+
+/*
+ * ld.lld does not support NOCROSSREFS:
+ * https://github.com/ClangBuiltLinux/linux/issues/1609
+ */
+#ifdef CONFIG_LD_IS_LLD
+#define NOCROSSREFS
+#endif
+
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
+#define OVERLAY_KEEP(x) KEEP(x)
+#else
+#define OVERLAY_KEEP(x) x
+#endif
+
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section) \
+ sym##_start = LOADADDR(section); \
+ sym##_end = LOADADDR(section) + SIZEOF(section)
+
+#define PROC_INFO \
+ . = ALIGN(4); \
+ __proc_info_begin = .; \
+ KEEP(*(.proc.info.init)) \
+ __proc_info_end = .;
+
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .; \
+
+#define ARM_DISCARD \
+ *(.ARM.exidx.exit.text) \
+ *(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
+ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
+ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
+ ARM_EXIT_DISCARD(EXIT_TEXT) \
+ ARM_EXIT_DISCARD(EXIT_DATA) \
+ EXIT_CALL \
+ ARM_MMU_DISCARD(*(.text.fixup)) \
+ ARM_MMU_DISCARD(*(__ex_table)) \
+ COMMON_DISCARDS
+
+/*
+ * Sections that should stay zero sized, which is safer to explicitly
+ * check instead of blindly discarding.
+ */
+#define ARM_ASSERTS \
+ .plt : { \
+ *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
+ } \
+ ASSERT(SIZEOF(.plt) == 0, \
+ "Unexpected run-time procedure linkages detected!")
+
+#define ARM_DETAILS \
+ ELF_DETAILS \
+ .ARM.attributes 0 : { *(.ARM.attributes) }
+
+#define ARM_STUBS_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ *(.vfp11_veneer) \
+ *(.v4_bx)
+
+#define ARM_TEXT \
+ IDMAP_TEXT \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .; \
+ IRQENTRY_TEXT \
+ SOFTIRQENTRY_TEXT \
+ TEXT_TEXT \
+ SCHED_TEXT \
+ LOCK_TEXT \
+ KPROBES_TEXT \
+ ARM_STUBS_TEXT \
+ . = ALIGN(4); \
+ *(.got) /* Global offset table */ \
+ ARM_CPU_KEEP(PROC_INFO)
+
+/* Stack unwinding tables */
+#define ARM_UNWIND_SECTIONS \
+ . = ALIGN(8); \
+ .ARM.unwind_idx : { \
+ __start_unwind_idx = .; \
+ *(.ARM.exidx*) \
+ __stop_unwind_idx = .; \
+ } \
+ .ARM.unwind_tab : { \
+ __start_unwind_tab = .; \
+ *(.ARM.extab*) \
+ __stop_unwind_tab = .; \
+ }
+
+/*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+#define ARM_VECTORS \
+ __vectors_lma = .; \
+ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
+ .vectors { \
+ OVERLAY_KEEP(*(.vectors)) \
+ } \
+ .vectors.bhb.loop8 { \
+ OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
+ } \
+ .vectors.bhb.bpiall { \
+ OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
+ } \
+ } \
+ ARM_LMA(__vectors, .vectors); \
+ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
+ ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
+ . = __vectors_lma + SIZEOF(.vectors) + \
+ SIZEOF(.vectors.bhb.loop8) + \
+ SIZEOF(.vectors.bhb.bpiall); \
+ \
+ __stubs_lma = .; \
+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
+ *(.stubs) \
+ } \
+ ARM_LMA(__stubs, .stubs); \
+ . = __stubs_lma + SIZEOF(.stubs); \
+ \
+ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+
+#define ARM_TCM \
+ __itcm_start = ALIGN(4); \
+ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
+ __sitcm_text = .; \
+ *(.tcm.text) \
+ *(.tcm.rodata) \
+ . = ALIGN(4); \
+ __eitcm_text = .; \
+ } \
+ . = __itcm_start + SIZEOF(.text_itcm); \
+ \
+ __dtcm_start = .; \
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
+ __sdtcm_data = .; \
+ *(.tcm.data) \
+ . = ALIGN(4); \
+ __edtcm_data = .; \
+ } \
+ . = __dtcm_start + SIZEOF(.data_dtcm);
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index 5831dce4b51c..f9a3897b06e7 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_WORD_AT_A_TIME_H
#define __ASM_ARM_WORD_AT_A_TIME_H
@@ -7,7 +8,8 @@
* Little-endian word-at-a-time zero byte handling.
* Heavily based on the x86 algorithm.
*/
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 620dc75362e5..c83086f745cf 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_XEN_EVENTS_H
#define _ASM_ARM_XEN_EVENTS_H
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
deleted file mode 100644
index b3ef061d8b74..000000000000
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <xen/arm/page-coherent.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 31bbc803cecb..dc7f6e91aafa 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1 +1,6 @@
#include <xen/arm/page.h>
+
+static inline bool xen_kernel_unmapped_at_usr(void)
+{
+ return false;
+}
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000000..455ade5d5320
--- /dev/null
+++ b/arch/arm/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
index ec154e719b11..7ebb7eb0bd93 100644
--- a/arch/arm/include/asm/xen/xen-ops.h
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -1,6 +1,2 @@
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <xen/arm/xen-ops.h>
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 4ffb26d4cad8..934b549905f5 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/xor.h
*
* Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/hardirq.h>
#include <asm-generic/xor.h>
@@ -47,13 +44,14 @@
: "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
static void
-xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -67,14 +65,15 @@ xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -89,8 +88,10 @@ xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -108,8 +109,11 @@ xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -149,7 +153,8 @@ static struct xor_block_template xor_block_arm4regs = {
extern struct xor_block_template const xor_block_neon_inner;
static void
-xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
if (in_interrupt()) {
xor_arm4regs_2(bytes, p1, p2);
@@ -161,8 +166,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
if (in_interrupt()) {
xor_arm4regs_3(bytes, p1, p2, p3);
@@ -174,8 +180,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
if (in_interrupt()) {
xor_arm4regs_4(bytes, p1, p2, p3, p4);
@@ -187,8 +195,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
if (in_interrupt()) {
xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S
index 7f7446f6f806..e3692a37cede 100644
--- a/arch/arm/include/debug/8250.S
+++ b/arch/arm/include/debug/8250.S
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/debug/8250.S
*
* Copyright (C) 1994-2013 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/serial_reg.h>
@@ -48,10 +45,11 @@
bne 1002b
.endm
- .macro waituart,rd,rx
-#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL
+ .macro waituarttxrdy,rd,rx
+ .endm
+
+ .macro waituartcts,rd,rx
1001: load \rd, [\rx, #UART_MSR << UART_SHIFT]
tst \rd, #UART_MSR_CTS
beq 1001b
-#endif
.endm
diff --git a/arch/arm/include/debug/asm9260.S b/arch/arm/include/debug/asm9260.S
index 292f85b49fca..5a0ce145c44a 100644
--- a/arch/arm/include/debug/asm9260.S
+++ b/arch/arm/include/debug/asm9260.S
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
* Modified for ASM9260 by Oleksij Remepl <linux@rempel-privat.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
.macro addruart, rp, rv, tmp
@@ -15,7 +11,10 @@
ldr \rv, = CONFIG_DEBUG_UART_VIRT
.endm
- .macro waituart,rd,rx
+ .macro waituarttxrdy,rd,rx
+ .endm
+
+ .macro waituartcts,rd,rx
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S
index 0098401e5aeb..17722824e2f2 100644
--- a/arch/arm/include/debug/at91.S
+++ b/arch/arm/include/debug/at91.S
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003-2005 SAN People
*
* Debugging macro include header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define AT91_DBGU_SR (0x14) /* Status Register */
@@ -23,12 +19,15 @@
strb \rd, [\rx, #(AT91_DBGU_THR)] @ Write to Transmitter Holding Register
.endm
- .macro waituart,rd,rx
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register
tst \rd, #AT91_DBGU_TXRDY @ DBGU_TXRDY = 1 when ready to transmit
beq 1001b
.endm
+ .macro waituartcts,rd,rx
+ .endm
+
.macro busyuart,rd,rx
1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register
tst \rd, #AT91_DBGU_TXEMPTY @ DBGU_TXEMPTY = 1 when transmission complete
diff --git a/arch/arm/include/debug/bcm63xx.S b/arch/arm/include/debug/bcm63xx.S
index e7164d570f44..da65abb6738d 100644
--- a/arch/arm/include/debug/bcm63xx.S
+++ b/arch/arm/include/debug/bcm63xx.S
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Broadcom BCM63xx low-level UART debug
*
* Copyright (C) 2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/serial_bcm63xx.h>
@@ -20,12 +17,15 @@
strb \rd, [\rx, #UART_FIFO_REG]
.endm
- .macro waituart, rd, rx
+ .macro waituarttxrdy, rd, rx
1001: ldr \rd, [\rx, #UART_IR_REG]
tst \rd, #(1 << UART_IR_TXEMPTY)
beq 1001b
.endm
+ .macro waituartcts, rd, rx
+ .endm
+
.macro busyuart, rd, rx
1002: ldr \rd, [\rx, #UART_IR_REG]
tst \rd, #(1 << UART_IR_TXTRESH)
diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S
index 52aaed2b936f..3f7d68740ed4 100644
--- a/arch/arm/include/debug/brcmstb.S
+++ b/arch/arm/include/debug/brcmstb.S
@@ -1,32 +1,33 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2016 Broadcom */
#include <linux/serial_reg.h>
+#include <asm/cputype.h>
/* Physical register offset and virtual register offset */
#define REG_PHYS_BASE 0xf0000000
+#define REG_PHYS_BASE_V7 0x08000000
#define REG_VIRT_BASE 0xfc000000
#define REG_PHYS_ADDR(x) ((x) + REG_PHYS_BASE)
+#define REG_PHYS_ADDR_V7(x) ((x) + REG_PHYS_BASE_V7)
/* Product id can be read from here */
#define SUN_TOP_CTRL_BASE REG_PHYS_ADDR(0x404000)
+#define SUN_TOP_CTRL_BASE_V7 REG_PHYS_ADDR_V7(0x404000)
#define UARTA_3390 REG_PHYS_ADDR(0x40a900)
+#define UARTA_72116 UARTA_7255
#define UARTA_7250 REG_PHYS_ADDR(0x40b400)
-#define UARTA_7260 REG_PHYS_ADDR(0x40c000)
-#define UARTA_7268 UARTA_7260
+#define UARTA_7255 REG_PHYS_ADDR(0x40c000)
+#define UARTA_7260 UARTA_7255
+#define UARTA_7268 UARTA_7255
#define UARTA_7271 UARTA_7268
+#define UARTA_7278 REG_PHYS_ADDR_V7(0x40c000)
+#define UARTA_7216 UARTA_7278
+#define UARTA_72164 UARTA_7278
+#define UARTA_72165 UARTA_7278
#define UARTA_7364 REG_PHYS_ADDR(0x40b000)
#define UARTA_7366 UARTA_7364
+#define UARTA_74165 UARTA_7278
#define UARTA_74371 REG_PHYS_ADDR(0x406b00)
#define UARTA_7439 REG_PHYS_ADDR(0x40a900)
#define UARTA_7445 REG_PHYS_ADDR(0x40ab00)
@@ -55,22 +56,43 @@
mov \rv, #0 @ yes; record init is done
str \rv, [\tmp]
+ /* Check for V7 memory map if B53 */
+ mrc p15, 0, \rv, c0, c0, 0 @ get Main ID register
+ ldr \rp, =ARM_CPU_PART_MASK
+ and \rv, \rv, \rp
+ ldr \rp, =ARM_CPU_PART_BRAHMA_B53 @ check for B53 CPU
+ cmp \rv, \rp
+ bne 10f
+
+ /* if PERIPHBASE doesn't overlap REG_PHYS_BASE use V7 map */
+ mrc p15, 1, \rv, c15, c3, 0 @ get PERIPHBASE from CBAR
+ ands \rv, \rv, #REG_PHYS_BASE
+ ldreq \rp, =SUN_TOP_CTRL_BASE_V7
+
/* Check SUN_TOP_CTRL base */
- ldr \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA
+10: ldrne \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA
ldr \rv, [\rp, #0] @ get register contents
+ARM_BE8( rev \rv, \rv )
and \rv, \rv, #0xffffff00 @ strip revision bits [7:0]
/* Chip specific detection starts here */
20: checkuart(\rp, \rv, 0x33900000, 3390)
-21: checkuart(\rp, \rv, 0x72500000, 7250)
-22: checkuart(\rp, \rv, 0x72600000, 7260)
-23: checkuart(\rp, \rv, 0x72680000, 7268)
-24: checkuart(\rp, \rv, 0x72710000, 7271)
-25: checkuart(\rp, \rv, 0x73640000, 7364)
-26: checkuart(\rp, \rv, 0x73660000, 7366)
-27: checkuart(\rp, \rv, 0x07437100, 74371)
-28: checkuart(\rp, \rv, 0x74390000, 7439)
-29: checkuart(\rp, \rv, 0x74450000, 7445)
+21: checkuart(\rp, \rv, 0x07211600, 72116)
+22: checkuart(\rp, \rv, 0x72160000, 7216)
+23: checkuart(\rp, \rv, 0x07216400, 72164)
+24: checkuart(\rp, \rv, 0x07216500, 72165)
+25: checkuart(\rp, \rv, 0x72500000, 7250)
+26: checkuart(\rp, \rv, 0x72550000, 7255)
+27: checkuart(\rp, \rv, 0x72600000, 7260)
+28: checkuart(\rp, \rv, 0x72680000, 7268)
+29: checkuart(\rp, \rv, 0x72710000, 7271)
+30: checkuart(\rp, \rv, 0x72780000, 7278)
+31: checkuart(\rp, \rv, 0x73640000, 7364)
+32: checkuart(\rp, \rv, 0x73660000, 7366)
+33: checkuart(\rp, \rv, 0x07416500, 74165)
+34: checkuart(\rp, \rv, 0x07437100, 74371)
+35: checkuart(\rp, \rv, 0x74390000, 7439)
+36: checkuart(\rp, \rv, 0x74450000, 7445)
/* No valid UART found */
90: mov \rp, #0
@@ -98,11 +120,13 @@
.endm
.macro store, rd, rx:vararg
+ARM_BE8( rev \rd, \rd )
str \rd, \rx
.endm
.macro load, rd, rx:vararg
ldr \rd, \rx
+ARM_BE8( rev \rd, \rd )
.endm
.macro senduart,rd,rx
@@ -116,7 +140,10 @@
bne 1002b
.endm
- .macro waituart,rd,rx
+ .macro waituarttxrdy,rd,rx
+ .endm
+
+ .macro waituartcts,rd,rx
.endm
/*
diff --git a/arch/arm/include/debug/clps711x.S b/arch/arm/include/debug/clps711x.S
index c17ac5c9e5f3..a983d12a6515 100644
--- a/arch/arm/include/debug/clps711x.S
+++ b/arch/arm/include/debug/clps711x.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef CONFIG_DEBUG_CLPS711X_UART2
@@ -24,7 +20,10 @@
ldr \rp, =CLPS711X_UART_PADDR
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/include/debug/dc21285.S b/arch/arm/include/debug/dc21285.S
index 02247f313e94..4ec0e5e31704 100644
--- a/arch/arm/include/debug/dc21285.S
+++ b/arch/arm/include/debug/dc21285.S
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/mach-footbridge/include/mach/debug-macro.S
*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <asm/hardware/dec21285.h>
@@ -38,5 +34,8 @@
bne 1001b
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
diff --git a/arch/arm/include/debug/digicolor.S b/arch/arm/include/debug/digicolor.S
index c9517150766a..443674cad76a 100644
--- a/arch/arm/include/debug/digicolor.S
+++ b/arch/arm/include/debug/digicolor.S
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Debugging macro include header for Conexant Digicolor USART
*
* Copyright (C) 2014 Paradox Innovation Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define UA0_STATUS 0x0742
@@ -25,7 +21,10 @@
strb \rd, [\rx, #UA0_EMI_REC]
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
.macro busyuart,rd,rx
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
deleted file mode 100644
index 660fa1e4b77b..000000000000
--- a/arch/arm/include/debug/efm32.S
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define UARTn_CMD 0x000c
-#define UARTn_CMD_TXEN 0x0004
-
-#define UARTn_STATUS 0x0010
-#define UARTn_STATUS_TXC 0x0020
-#define UARTn_STATUS_TXBL 0x0040
-
-#define UARTn_TXDATA 0x0034
-
- .macro addruart, rx, tmp, tmp2
- ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
-
- /*
- * enable TX. The driver might disable it to save energy. We
- * don't care about disabling at the end as during debug power
- * consumption isn't that important.
- */
- ldr \tmp, =(UARTn_CMD_TXEN)
- str \tmp, [\rx, #UARTn_CMD]
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UARTn_TXDATA]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UARTn_STATUS]
- tst \rd, #UARTn_STATUS_TXBL
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, UARTn_STATUS]
- tst \rd, #UARTn_STATUS_TXC
- bne 1001b
- .endm
diff --git a/arch/arm/include/debug/exynos.S b/arch/arm/include/debug/exynos.S
index 60bf3c23200d..74b56769f9cb 100644
--- a/arch/arm/include/debug/exynos.S
+++ b/arch/arm/include/debug/exynos.S
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
/* pull in the relevant register and map files. */
diff --git a/arch/arm/include/debug/icedcc.S b/arch/arm/include/debug/icedcc.S
index 43afcb021fa3..d5e65da8a687 100644
--- a/arch/arm/include/debug/icedcc.S
+++ b/arch/arm/include/debug/icedcc.S
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/debug/icedcc.S
*
* Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
@@ debug using ARM EmbeddedICE DCC channel
@@ -27,7 +23,10 @@
beq 1001b
.endm
- .macro waituart, rd, rx
+ .macro waituartcts, rd, rx
+ .endm
+
+ .macro waituarttxrdy, rd, rx
mov \rd, #0x2000000
1001:
subs \rd, \rd, #1
@@ -51,7 +50,10 @@
beq 1001b
.endm
- .macro waituart, rd, rx
+ .macro waituartcts, rd, rx
+ .endm
+
+ .macro waituarttxrdy, rd, rx
mov \rd, #0x10000000
1001:
subs \rd, \rd, #1
@@ -76,7 +78,10 @@
.endm
- .macro waituart, rd, rx
+ .macro waituartcts, rd, rx
+ .endm
+
+ .macro waituarttxrdy, rd, rx
mov \rd, #0x2000000
1001:
subs \rd, \rd, #1
diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h
index bce58e975ad1..3edbb3c5b42b 100644
--- a/arch/arm/include/debug/imx-uart.h
+++ b/arch/arm/include/debug/imx-uart.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012-2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DEBUG_IMX_UART_H
@@ -14,13 +11,6 @@
#define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR
#define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n)
-#define IMX21_UART1_BASE_ADDR 0x1000a000
-#define IMX21_UART2_BASE_ADDR 0x1000b000
-#define IMX21_UART3_BASE_ADDR 0x1000c000
-#define IMX21_UART4_BASE_ADDR 0x1000d000
-#define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR
-#define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n)
-
#define IMX25_UART1_BASE_ADDR 0x43f90000
#define IMX25_UART2_BASE_ADDR 0x43f94000
#define IMX25_UART3_BASE_ADDR 0x5000c000
@@ -29,6 +19,13 @@
#define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR
#define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n)
+#define IMX27_UART1_BASE_ADDR 0x1000a000
+#define IMX27_UART2_BASE_ADDR 0x1000b000
+#define IMX27_UART3_BASE_ADDR 0x1000c000
+#define IMX27_UART4_BASE_ADDR 0x1000d000
+#define IMX27_UART_BASE_ADDR(n) IMX27_UART##n##_BASE_ADDR
+#define IMX27_UART_BASE(n) IMX27_UART_BASE_ADDR(n)
+
#define IMX31_UART1_BASE_ADDR 0x43f90000
#define IMX31_UART2_BASE_ADDR 0x43f94000
#define IMX31_UART3_BASE_ADDR 0x5000c000
@@ -115,10 +112,10 @@
#ifdef CONFIG_DEBUG_IMX1_UART
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX1)
-#elif defined(CONFIG_DEBUG_IMX21_IMX27_UART)
-#define UART_PADDR IMX_DEBUG_UART_BASE(IMX21)
#elif defined(CONFIG_DEBUG_IMX25_UART)
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX25)
+#elif defined(CONFIG_DEBUG_IMX27_UART)
+#define UART_PADDR IMX_DEBUG_UART_BASE(IMX27)
#elif defined(CONFIG_DEBUG_IMX31_UART)
#define UART_PADDR IMX_DEBUG_UART_BASE(IMX31)
#elif defined(CONFIG_DEBUG_IMX35_UART)
diff --git a/arch/arm/include/debug/imx.S b/arch/arm/include/debug/imx.S
index 92c44760d656..bb7b9550580c 100644
--- a/arch/arm/include/debug/imx.S
+++ b/arch/arm/include/debug/imx.S
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/mach-imx/include/mach/debug-macro.S
*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <asm/assembler.h>
@@ -39,7 +35,10 @@
str \rd, [\rx, #0x40] @ TXDATA
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
.macro busyuart,rd,rx
diff --git a/arch/arm/include/debug/ks8695.S b/arch/arm/include/debug/ks8695.S
deleted file mode 100644
index 961da1f32ab3..000000000000
--- a/arch/arm/include/debug/ks8695.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * arch/arm/include/debug/ks8695.S
- *
- * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk>
- * Copyright (C) 2006 Simtec Electronics
- *
- * KS8695 - Debug macros
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define KS8695_UART_PA 0x03ffe000
-#define KS8695_UART_VA 0xf00fe000
-#define KS8695_URTH (0x04)
-#define KS8695_URLS (0x14)
-#define URLS_URTE (1 << 6)
-#define URLS_URTHRE (1 << 5)
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =KS8695_UART_PA @ physical base address
- ldr \rv, =KS8695_UART_VA @ virtual base address
- .endm
-
- .macro senduart, rd, rx
- str \rd, [\rx, #KS8695_URTH] @ Write to Transmit Holding Register
- .endm
-
- .macro busyuart, rd, rx
-1001: ldr \rd, [\rx, #KS8695_URLS] @ Read Line Status Register
- tst \rd, #URLS_URTE @ Holding & Shift registers empty?
- beq 1001b
- .endm
-
- .macro waituart, rd, rx
-1001: ldr \rd, [\rx, #KS8695_URLS] @ Read Line Status Register
- tst \rd, #URLS_URTHRE @ Holding Register empty?
- beq 1001b
- .endm
diff --git a/arch/arm/include/debug/meson.S b/arch/arm/include/debug/meson.S
index 1bae99bf6f11..7b60e4401225 100644
--- a/arch/arm/include/debug/meson.S
+++ b/arch/arm/include/debug/meson.S
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Carlo Caione
* Carlo Caione <carlo@caione.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define MESON_AO_UART_WFIFO 0x0
@@ -28,7 +25,10 @@
beq 1002b
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #MESON_AO_UART_STATUS]
tst \rd, #MESON_AO_UART_TX_FIFO_FULL
bne 1001b
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index b03024fa671f..530edc74f9a3 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
.macro addruart, rp, rv, tmp
@@ -26,7 +17,10 @@ ARM_BE8(rev \rd, \rd )
str \rd, [\rx, #0x70]
.endm
- .macro waituart, rd, rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy, rd, rx
@ check for TX_EMT in UARTDM_SR
ldr \rd, [\rx, #0x08]
ARM_BE8(rev \rd, \rd )
diff --git a/arch/arm/include/debug/netx.S b/arch/arm/include/debug/netx.S
deleted file mode 100644
index 81e1b2af70f7..000000000000
--- a/arch/arm/include/debug/netx.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#define UART_DATA 0
-#define UART_FLAG 0x18
-#define UART_FLAG_BUSY (1 << 3)
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =CONFIG_DEBUG_UART_PHYS
- ldr \rv, =CONFIG_DEBUG_UART_VIRT
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_DATA]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_FLAG]
- tst \rd, #UART_FLAG_BUSY
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UART_FLAG]
- tst \rd, #UART_FLAG_BUSY
- bne 1001b
- .endm
diff --git a/arch/arm/include/debug/omap2plus.S b/arch/arm/include/debug/omap2plus.S
index 6d867aef18eb..0680be6c79d3 100644
--- a/arch/arm/include/debug/omap2plus.S
+++ b/arch/arm/include/debug/omap2plus.S
@@ -1,54 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/serial_reg.h>
-/* OMAP2 serial ports */
-#define OMAP2_UART1_BASE 0x4806a000
-#define OMAP2_UART2_BASE 0x4806c000
-#define OMAP2_UART3_BASE 0x4806e000
-
-/* OMAP3 serial ports */
-#define OMAP3_UART1_BASE OMAP2_UART1_BASE
-#define OMAP3_UART2_BASE OMAP2_UART2_BASE
-#define OMAP3_UART3_BASE 0x49020000
-#define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */
-#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */
-
-/* OMAP4 serial ports */
-#define OMAP4_UART1_BASE OMAP2_UART1_BASE
-#define OMAP4_UART2_BASE OMAP2_UART2_BASE
-#define OMAP4_UART3_BASE 0x48020000
-#define OMAP4_UART4_BASE 0x4806e000
-
-/* TI81XX serial ports */
-#define TI81XX_UART1_BASE 0x48020000
-#define TI81XX_UART2_BASE 0x48022000
-#define TI81XX_UART3_BASE 0x48024000
-
-/* AM3505/3517 UART4 */
-#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
-
-/* AM33XX serial port */
-#define AM33XX_UART1_BASE 0x44E09000
-
-/* OMAP5 serial ports */
-#define OMAP5_UART1_BASE OMAP2_UART1_BASE
-#define OMAP5_UART2_BASE OMAP2_UART2_BASE
-#define OMAP5_UART3_BASE OMAP4_UART3_BASE
-#define OMAP5_UART4_BASE OMAP4_UART4_BASE
-#define OMAP5_UART5_BASE 0x48066000
-#define OMAP5_UART6_BASE 0x48068000
-
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
#define ZOOM_UART_VIRT 0xfa400000
@@ -59,6 +18,7 @@
#define UART_OFFSET(addr) ((addr) & 0x00ffffff)
.pushsection .data
+ .align 2
omap_uart_phys: .word 0
omap_uart_virt: .word 0
omap_uart_lsr: .word 0
@@ -79,55 +39,6 @@ omap_uart_lsr: .word 0
bne 100f @ already configured
/* Configure the UART offset from the phys/virt base */
-#ifdef CONFIG_DEBUG_OMAP2UART1
- mov \rp, #UART_OFFSET(OMAP2_UART1_BASE) @ omap2/3/4
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP2UART2
- mov \rp, #UART_OFFSET(OMAP2_UART2_BASE) @ omap2/3/4
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP2UART3
- mov \rp, #UART_OFFSET(OMAP2_UART3_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP3UART3
- mov \rp, #UART_OFFSET(OMAP3_UART1_BASE)
- add \rp, \rp, #0x00fb0000
- add \rp, \rp, #0x00006000 @ OMAP3_UART3_BASE
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP4UART3
- mov \rp, #UART_OFFSET(OMAP4_UART3_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP3UART4
- mov \rp, #UART_OFFSET(OMAP3_UART1_BASE)
- add \rp, \rp, #0x00fb0000
- add \rp, \rp, #0x00028000 @ OMAP3_UART4_BASE
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_OMAP4UART4
- mov \rp, #UART_OFFSET(OMAP4_UART4_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_TI81XXUART1
- mov \rp, #UART_OFFSET(TI81XX_UART1_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_TI81XXUART2
- mov \rp, #UART_OFFSET(TI81XX_UART2_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_TI81XXUART3
- mov \rp, #UART_OFFSET(TI81XX_UART3_BASE)
- b 98f
-#endif
-#ifdef CONFIG_DEBUG_AM33XXUART1
- ldr \rp, =AM33XX_UART1_BASE
- and \rp, \rp, #0x00ffffff
- b 97f
-#endif
#ifdef CONFIG_DEBUG_ZOOM_UART
ldr \rp, =ZOOM_UART_BASE
str \rp, [\tmp, #0] @ omap_uart_phys
@@ -138,28 +49,6 @@ omap_uart_lsr: .word 0
#endif
b 10b
- /* AM33XX: Store both phys and virt address for the uart */
-97: add \rp, \rp, #0x44000000 @ phys base
- str \rp, [\tmp, #0] @ omap_uart_phys
- sub \rp, \rp, #0x44000000 @ phys base
- add \rp, \rp, #0xf9000000 @ virt base
- str \rp, [\tmp, #4] @ omap_uart_virt
- mov \rp, #(UART_LSR << OMAP_PORT_SHIFT)
- str \rp, [\tmp, #8] @ omap_uart_lsr
-
- b 10b
-
- /* Store both phys and virt address for the uart */
-98: add \rp, \rp, #0x48000000 @ phys base
- str \rp, [\tmp, #0] @ omap_uart_phys
- sub \rp, \rp, #0x48000000 @ phys base
- add \rp, \rp, #0xfa000000 @ virt base
- str \rp, [\tmp, #4] @ omap_uart_virt
- mov \rp, #(UART_LSR << OMAP_PORT_SHIFT)
- str \rp, [\tmp, #8] @ omap_uart_lsr
-
- b 10b
-
.align
99: .word .
.word omap_uart_phys
@@ -186,5 +75,8 @@ omap_uart_lsr: .word 0
bne 1001b
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
diff --git a/arch/arm/include/debug/palmchip.S b/arch/arm/include/debug/palmchip.S
index 6824b2d1c38e..aed59332e487 100644
--- a/arch/arm/include/debug/palmchip.S
+++ b/arch/arm/include/debug/palmchip.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/serial_reg.h>
#undef UART_TX
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index f7d8323cefcc..c7e02d0628bf 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -1,24 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/include/debug/pl01x.S
*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/amba/serial.h>
-#ifdef CONFIG_DEBUG_ZTE_ZX
-#undef UART01x_DR
-#undef UART01x_FR
-#define UART01x_DR 0x04
-#define UART01x_FR 0x14
-#endif
-
#ifdef CONFIG_DEBUG_UART_PHYS
.macro addruart, rp, rv, tmp
ldr \rp, =CONFIG_DEBUG_UART_PHYS
@@ -30,7 +19,10 @@
strb \rd, [\rx, #UART01x_DR]
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_TXFF
diff --git a/arch/arm/include/debug/renesas-scif.S b/arch/arm/include/debug/renesas-scif.S
index 97820a8df51a..8e433e981bbe 100644
--- a/arch/arm/include/debug/renesas-scif.S
+++ b/arch/arm/include/debug/renesas-scif.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SCIF(A) debugging macro include header
*
@@ -5,16 +6,16 @@
*
* Copyright (C) 2012-2013 Renesas Electronics Corporation
* Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define SCIF_PHYS CONFIG_DEBUG_UART_PHYS
#define SCIF_VIRT ((SCIF_PHYS & 0x00ffffff) | 0xfd000000)
-#if CONFIG_DEBUG_UART_PHYS < 0xe6e00000
+#if defined(CONFIG_DEBUG_R7S9210_SCIF2) || defined(CONFIG_DEBUG_R7S9210_SCIF4)
+/* RZ/A2 SCIFA */
+#define FTDR 0x06
+#define FSR 0x08
+#elif CONFIG_DEBUG_UART_PHYS < 0xe6e00000
/* SCIFA */
#define FTDR 0x20
#define FSR 0x14
@@ -32,7 +33,10 @@
ldr \rv, =SCIF_VIRT
.endm
- .macro waituart, rd, rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy, rd, rx
1001: ldrh \rd, [\rx, #FSR]
tst \rd, #TDFE
beq 1001b
diff --git a/arch/arm/include/debug/s3c24xx.S b/arch/arm/include/debug/s3c24xx.S
index b1f54dc4888c..7ab5e577cd42 100644
--- a/arch/arm/include/debug/s3c24xx.S
+++ b/arch/arm/include/debug/s3c24xx.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/mach-s3c2410/include/mach/debug-macro.S
*
* Debugging macro include header
@@ -6,10 +7,6 @@
* Copyright (C) 2005 Simtec Electronics
*
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/serial_s3c.h>
@@ -31,16 +28,6 @@
and \rd, \rd, #S3C2410_UFSTAT_TXMASK
.endm
-/* Select the correct implementation depending on the configuration. The
- * S3C2440 will get selected by default, as these are the most widely
- * used variants of these
-*/
-
-#if defined(CONFIG_DEBUG_S3C2410_UART)
-#define fifo_full fifo_full_s3c2410
-#define fifo_level fifo_level_s3c2410
-#endif
-
/* include the reset of the code which will do the work */
#include <debug/samsung.S>
diff --git a/arch/arm/include/debug/s5pv210.S b/arch/arm/include/debug/s5pv210.S
index 4f1a73e2c1a1..820a1cfb0595 100644
--- a/arch/arm/include/debug/s5pv210.S
+++ b/arch/arm/include/debug/s5pv210.S
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* pull in the relevant register and map files. */
diff --git a/arch/arm/include/debug/sa1100.S b/arch/arm/include/debug/sa1100.S
index a0ae4f4cd924..7968ea52df3d 100644
--- a/arch/arm/include/debug/sa1100.S
+++ b/arch/arm/include/debug/sa1100.S
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/include/debug/sa1100.S
*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define UTCR3 0x0c
@@ -55,7 +51,10 @@
str \rd, [\rx, #UTDR]
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #UTSR1]
tst \rd, #UTSR1_TNF
beq 1001b
diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S
index f4eeed2a1981..ab474d564a90 100644
--- a/arch/arm/include/debug/samsung.S
+++ b/arch/arm/include/debug/samsung.S
@@ -1,13 +1,9 @@
-/* arch/arm/plat-samsung/include/plat/debug-macro.S
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright 2005, 2007 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/serial_s3c.h>
@@ -73,7 +69,10 @@ ARM_BE8(rev \rd, \rd)
1002: @ exit busyuart
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
ldr \rd, [\rx, # S3C2410_UFCON]
ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
diff --git a/arch/arm/include/debug/sirf.S b/arch/arm/include/debug/sirf.S
deleted file mode 100644
index 630f231f2f37..000000000000
--- a/arch/arm/include/debug/sirf.S
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * arch/arm/mach-prima2/include/mach/debug-macro.S
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#define SIRF_LLUART_TXFIFO_STATUS 0x0114
-#define SIRF_LLUART_TXFIFO_DATA 0x0118
-
-#define SIRF_LLUART_TXFIFO_FULL (1 << 5)
-
-#ifdef CONFIG_DEBUG_SIRFATLAS7_UART0
-#define SIRF_LLUART_TXFIFO_EMPTY (1 << 8)
-#else
-#define SIRF_LLUART_TXFIFO_EMPTY (1 << 6)
-#endif
-
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical
- ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virtual
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #SIRF_LLUART_TXFIFO_DATA]
- .endm
-
- .macro busyuart,rd,rx
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #SIRF_LLUART_TXFIFO_STATUS]
- tst \rd, #SIRF_LLUART_TXFIFO_EMPTY
- beq 1001b
- .endm
-
diff --git a/arch/arm/include/debug/sti.S b/arch/arm/include/debug/sti.S
index e3aa58ff1776..dc796ac2ac57 100644
--- a/arch/arm/include/debug/sti.S
+++ b/arch/arm/include/debug/sti.S
@@ -1,36 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/debug/sti.S
*
* Debugging macro include header
* Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#define STIH41X_COMMS_BASE 0xfed00000
-#define STIH41X_ASC2_BASE (STIH41X_COMMS_BASE+0x32000)
-
-#define STIH41X_SBC_LPM_BASE 0xfe400000
-#define STIH41X_SBC_COMMS_BASE (STIH41X_SBC_LPM_BASE + 0x100000)
-#define STIH41X_SBC_ASC1_BASE (STIH41X_SBC_COMMS_BASE + 0x31000)
-
-
-#define VIRT_ADDRESS(x) (x - 0x1000000)
-
-#if IS_ENABLED(CONFIG_STIH41X_DEBUG_ASC2)
-#define DEBUG_LL_UART_BASE STIH41X_ASC2_BASE
-#endif
-
-#if IS_ENABLED(CONFIG_STIH41X_DEBUG_SBC_ASC1)
-#define DEBUG_LL_UART_BASE STIH41X_SBC_ASC1_BASE
-#endif
-
-#ifndef DEBUG_LL_UART_BASE
-#error "DEBUG UART is not Configured"
-#endif
-
#define ASC_TX_BUF_OFF 0x04
#define ASC_CTRL_OFF 0x0c
#define ASC_STA_OFF 0x14
@@ -40,15 +15,18 @@
.macro addruart, rp, rv, tmp
- ldr \rp, =DEBUG_LL_UART_BASE @ physical base
- ldr \rv, =VIRT_ADDRESS(DEBUG_LL_UART_BASE) @ virt base
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base
.endm
.macro senduart,rd,rx
strb \rd, [\rx, #ASC_TX_BUF_OFF]
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #ASC_STA_OFF]
tst \rd, #ASC_STA_TX_FULL
bne 1001b
diff --git a/arch/arm/include/debug/stm32.S b/arch/arm/include/debug/stm32.S
new file mode 100644
index 000000000000..b6d9df30e37d
--- /dev/null
+++ b/arch/arm/include/debug/stm32.S
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2017 - All Rights Reserved
+ * Author: Gerald Baeza <gerald.baeza@st.com> for STMicroelectronics.
+ */
+
+#ifdef CONFIG_STM32F4_DEBUG_UART
+#define STM32_USART_SR_OFF 0x00
+#define STM32_USART_TDR_OFF 0x04
+#endif
+
+#if defined(CONFIG_STM32F7_DEBUG_UART) || defined(CONFIG_STM32H7_DEBUG_UART) || \
+ defined(CONFIG_STM32MP1_DEBUG_UART)
+#define STM32_USART_SR_OFF 0x1C
+#define STM32_USART_TDR_OFF 0x28
+#endif
+
+#define STM32_USART_TC (1 << 6) /* Tx complete */
+#define STM32_USART_TXE (1 << 7) /* Tx data reg empty */
+
+.macro addruart, rp, rv, tmp
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base
+.endm
+
+.macro senduart,rd,rx
+ strb \rd, [\rx, #STM32_USART_TDR_OFF]
+.endm
+
+.macro waituartcts,rd,rx
+.endm
+
+.macro waituarttxrdy,rd,rx
+1001: ldr \rd, [\rx, #(STM32_USART_SR_OFF)] @ Read Status Register
+ tst \rd, #STM32_USART_TXE @ TXE = 1 = tx empty
+ beq 1001b
+.endm
+
+.macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #(STM32_USART_SR_OFF)] @ Read Status Register
+ tst \rd, #STM32_USART_TC @ TC = 1 = tx complete
+ beq 1001b
+.endm
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index 3bc80599c022..7454480d084b 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2010,2011 Google, Inc.
* Copyright (C) 2011-2012 NVIDIA CORPORATION. All Rights Reserved.
@@ -10,16 +11,6 @@
*
* Portions based on mach-omap2's debug-macro.S
* Copyright (C) 1994-1999 Russell King
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/serial_reg.h>
@@ -158,7 +149,34 @@
.align
99: .word .
+#if defined(ZIMAGE)
+ .word . + 4
+/*
+ * Storage for the state maintained by the macro.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the storage right here, since common.c
+ * isn't included in the decompressor build. This storage data gets put in
+ * .text even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#else
.word tegra_uart_config
+#endif
.ltorg
/* Load previously selected UART address */
@@ -173,7 +191,7 @@
.macro senduart, rd, rx
cmp \rx, #0
- strneb \rd, [\rx, #UART_TX << UART_SHIFT]
+ strbne \rd, [\rx, #UART_TX << UART_SHIFT]
1001:
.endm
@@ -187,40 +205,14 @@
1002:
.endm
- .macro waituart, rd, rx
-#ifdef FLOW_CONTROL
+ .macro waituartcts, rd, rx
cmp \rx, #0
beq 1002f
1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
tst \rd, #UART_MSR_CTS
beq 1001b
1002:
-#endif
.endm
-/*
- * Storage for the state maintained by the macros above.
- *
- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
- * That's because this header is included from multiple files, and we only
- * want a single copy of the data. In particular, the UART probing code above
- * assumes it's running using physical addresses. This is true when this file
- * is included from head.o, but not when included from debug.o. So we need
- * to share the probe results between the two copies, rather than having
- * to re-run the probing again later.
- *
- * In the decompressor, we put the symbol/storage right here, since common.c
- * isn't included in the decompressor build. This symbol gets put in .text
- * even though it's really data, since .data is discarded from the
- * decompressor. Luckily, .text is writeable in the decompressor, unless
- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
- */
-#if defined(ZIMAGE)
-tegra_uart_config:
- /* Debug UART initialization required */
- .word 1
- /* Debug UART physical address */
- .word 0
- /* Debug UART virtual address */
- .word 0
-#endif
+ .macro waituarttxrdy,rd,rx
+ .endm
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
index 0e2949b0fae9..ff0b227290cb 100644
--- a/arch/arm/include/debug/uncompress.h
+++ b/arch/arm/include/debug/uncompress.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_DEBUG_UNCOMPRESS
extern void putc(int c);
#else
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S
index aa7f63a8b5e0..c516900947bb 100644
--- a/arch/arm/include/debug/ux500.S
+++ b/arch/arm/include/debug/ux500.S
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Debugging macro include header
*
* Copyright (C) 2009 ST-Ericsson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
@@ -24,21 +20,16 @@
#define U8500_UART0_PHYS_BASE (0x80120000)
#define U8500_UART1_PHYS_BASE (0x80121000)
#define U8500_UART2_PHYS_BASE (0x80007000)
-#define U8500_UART0_VIRT_BASE (0xf8120000)
-#define U8500_UART1_VIRT_BASE (0xf8121000)
-#define U8500_UART2_VIRT_BASE (0xf8007000)
#define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE
-#define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE
#endif
-#if !defined(__UX500_PHYS_UART) || !defined(__UX500_VIRT_UART)
+#if !defined(__UX500_PHYS_UART)
#error Unknown SOC
#endif
#define UX500_PHYS_UART(n) __UX500_PHYS_UART(n)
-#define UX500_VIRT_UART(n) __UX500_VIRT_UART(n)
#define UART_PHYS_BASE UX500_PHYS_UART(CONFIG_UX500_DEBUG_UART)
-#define UART_VIRT_BASE UX500_VIRT_UART(CONFIG_UX500_DEBUG_UART)
+#define UART_VIRT_BASE (0xfff07000)
.macro addruart, rp, rv, tmp
ldr \rp, =UART_PHYS_BASE @ no, physical address
diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S
index 524acd5a223e..ccb22e9a86a3 100644
--- a/arch/arm/include/debug/vexpress.S
+++ b/arch/arm/include/debug/vexpress.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/mach-realview/include/mach/debug-macro.S
*
* Debugging macro include header
*
* Copyright (C) 1994-1999 Russell King
* Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define DEBUG_LL_PHYS_BASE 0x10000000
diff --git a/arch/arm/include/debug/vf.S b/arch/arm/include/debug/vf.S
index b88933849a17..035bcbf117ab 100644
--- a/arch/arm/include/debug/vf.S
+++ b/arch/arm/include/debug/vf.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define VF_UART0_BASE_ADDR 0x40027000
@@ -33,5 +29,8 @@
beq 1001b @ wait until transmit done
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
diff --git a/arch/arm/include/debug/vt8500.S b/arch/arm/include/debug/vt8500.S
index 0e0ca0869da7..d01094fdbc8c 100644
--- a/arch/arm/include/debug/vt8500.S
+++ b/arch/arm/include/debug/vt8500.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Debugging macro include header
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
* Moved from arch/arm/mach-vt8500/include/mach/debug-macro.S
* Minor changes for readability.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define DEBUG_LL_PHYS_BASE 0xD8000000
@@ -31,7 +28,10 @@
bne 1001b
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
.endm
#endif
diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S
index 060cb5b49bfd..5d42cc35ecf3 100644
--- a/arch/arm/include/debug/zynq.S
+++ b/arch/arm/include/debug/zynq.S
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Debugging macro include header
*
* Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define UART_CR_OFFSET 0x00 /* Control Register [8:0] */
#define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */
@@ -41,7 +33,10 @@
strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA
.endm
- .macro waituart,rd,rx
+ .macro waituartcts,rd,rx
+ .endm
+
+ .macro waituarttxrdy,rd,rx
1001: ldr \rd, [\rx, #UART_SR_OFFSET]
ARM_BE8( rev \rd, \rd )
tst \rd, #UART_SR_TXEMPTY
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 8e17fe80b55b..63748af8bc9d 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,22 +1,5 @@
-# UAPI Header export list
-include include/uapi/asm-generic/Kbuild.asm
+# SPDX-License-Identifier: GPL-2.0
-generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
-
-generic-y += bitsperlong.h
-generic-y += errno.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
-generic-y += msgbuf.h
-generic-y += param.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += termios.h
+generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/auxvec.h b/arch/arm/include/uapi/asm/auxvec.h
index cb02a767a500..5c09da5965d4 100644
--- a/arch/arm/include/uapi/asm/auxvec.h
+++ b/arch/arm/include/uapi/asm/auxvec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_AUXVEC_H
#define __ASM_AUXVEC_H
diff --git a/arch/arm/include/uapi/asm/byteorder.h b/arch/arm/include/uapi/asm/byteorder.h
index 77379748b171..cb8406afe162 100644
--- a/arch/arm/include/uapi/asm/byteorder.h
+++ b/arch/arm/include/uapi/asm/byteorder.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/byteorder.h
*
diff --git a/arch/arm/include/uapi/asm/fcntl.h b/arch/arm/include/uapi/asm/fcntl.h
index a80b6607b2ef..e6b5d7141c05 100644
--- a/arch/arm/include/uapi/asm/fcntl.h
+++ b/arch/arm/include/uapi/asm/fcntl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ARM_FCNTL_H
#define _ARM_FCNTL_H
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 20d12f230a2f..6b2023e39b6f 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASMARM_HWCAP_H
#define _UAPI__ASMARM_HWCAP_H
@@ -14,7 +15,7 @@
#define HWCAP_EDSP (1 << 7)
#define HWCAP_JAVA (1 << 8)
#define HWCAP_IWMMXT (1 << 9)
-#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_CRUNCH (1 << 10) /* Obsolete */
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
@@ -27,6 +28,12 @@
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
+#define HWCAP_FPHP (1 << 22)
+#define HWCAP_ASIMDHP (1 << 23)
+#define HWCAP_ASIMDDP (1 << 24)
+#define HWCAP_ASIMDFHM (1 << 25)
+#define HWCAP_ASIMDBF16 (1 << 26)
+#define HWCAP_I8MM (1 << 27)
/*
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
@@ -36,5 +43,7 @@
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)
#define HWCAP2_CRC32 (1 << 4)
+#define HWCAP2_SB (1 << 5)
+#define HWCAP2_SSBS (1 << 6)
#endif /* _UAPI__ASMARM_HWCAP_H */
diff --git a/arch/arm/include/uapi/asm/ioctls.h b/arch/arm/include/uapi/asm/ioctls.h
index 9c9629816128..1bfe2854fb51 100644
--- a/arch/arm/include/uapi/asm/ioctls.h
+++ b/arch/arm/include/uapi/asm/ioctls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_ARM_IOCTLS_H
#define __ASM_ARM_IOCTLS_H
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
deleted file mode 100644
index 5db2d4c6a55f..000000000000
--- a/arch/arm/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_H__
-#define __ARM_KVM_H__
-
-#include <linux/types.h>
-#include <linux/psci.h>
-#include <asm/ptrace.h>
-
-#define __KVM_HAVE_GUEST_DEBUG
-#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
-/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
-#define KVM_ARM_SVC_sp svc_regs[0]
-#define KVM_ARM_SVC_lr svc_regs[1]
-#define KVM_ARM_SVC_spsr svc_regs[2]
-#define KVM_ARM_ABT_sp abt_regs[0]
-#define KVM_ARM_ABT_lr abt_regs[1]
-#define KVM_ARM_ABT_spsr abt_regs[2]
-#define KVM_ARM_UND_sp und_regs[0]
-#define KVM_ARM_UND_lr und_regs[1]
-#define KVM_ARM_UND_spsr und_regs[2]
-#define KVM_ARM_IRQ_sp irq_regs[0]
-#define KVM_ARM_IRQ_lr irq_regs[1]
-#define KVM_ARM_IRQ_spsr irq_regs[2]
-
-/* Valid only for fiq_regs in struct kvm_regs */
-#define KVM_ARM_FIQ_r8 fiq_regs[0]
-#define KVM_ARM_FIQ_r9 fiq_regs[1]
-#define KVM_ARM_FIQ_r10 fiq_regs[2]
-#define KVM_ARM_FIQ_fp fiq_regs[3]
-#define KVM_ARM_FIQ_ip fiq_regs[4]
-#define KVM_ARM_FIQ_sp fiq_regs[5]
-#define KVM_ARM_FIQ_lr fiq_regs[6]
-#define KVM_ARM_FIQ_spsr fiq_regs[7]
-
-struct kvm_regs {
- struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
- unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
- unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
- unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
- unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
- unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
-};
-
-/* Supported Processor Types */
-#define KVM_ARM_TARGET_CORTEX_A15 0
-#define KVM_ARM_TARGET_CORTEX_A7 1
-#define KVM_ARM_NUM_TARGETS 2
-
-/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
-#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
-#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
-
-/* Supported device IDs */
-#define KVM_ARM_DEVICE_VGIC_V2 0
-
-/* Supported VGIC address types */
-#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
-#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
-
-#define KVM_VGIC_V2_DIST_SIZE 0x1000
-#define KVM_VGIC_V2_CPU_SIZE 0x2000
-
-/* Supported VGICv3 address types */
-#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
-#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
-#define KVM_VGIC_ITS_ADDR_TYPE 4
-
-#define KVM_VGIC_V3_DIST_SIZE SZ_64K
-#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
-#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
-
-#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
-#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
-
-struct kvm_vcpu_init {
- __u32 target;
- __u32 features[7];
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-struct kvm_guest_debug_arch {
-};
-
-struct kvm_debug_exit_arch {
-};
-
-struct kvm_sync_regs {
- /* Used with KVM_CAP_ARM_USER_IRQ */
- __u64 device_irq_level;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-/* If you need to interpret the index values, here is the key: */
-#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
-#define KVM_REG_ARM_COPROC_SHIFT 16
-#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
-#define KVM_REG_ARM_32_OPC2_SHIFT 0
-#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
-#define KVM_REG_ARM_OPC1_SHIFT 3
-#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
-#define KVM_REG_ARM_CRM_SHIFT 7
-#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
-#define KVM_REG_ARM_32_CRN_SHIFT 11
-
-#define ARM_CP15_REG_SHIFT_MASK(x,n) \
- (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
-
-#define __ARM_CP15_REG(op1,crn,crm,op2) \
- (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
- ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
- ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
- ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
- ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
-
-#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
-
-#define __ARM_CP15_REG64(op1,crm) \
- (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
-#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
-
-#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
-#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
-
-/* Normal registers are mapped as coprocessor 16. */
-#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
-
-/* Some registers need more space to represent values. */
-#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
-#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
-#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
-#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
-#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
-
-/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
-#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
-#define KVM_REG_ARM_VFP_BASE_REG 0x0
-#define KVM_REG_ARM_VFP_FPSID 0x1000
-#define KVM_REG_ARM_VFP_FPSCR 0x1001
-#define KVM_REG_ARM_VFP_MVFR1 0x1006
-#define KVM_REG_ARM_VFP_MVFR0 0x1007
-#define KVM_REG_ARM_VFP_FPEXC 0x1008
-#define KVM_REG_ARM_VFP_FPINST 0x1009
-#define KVM_REG_ARM_VFP_FPINST2 0x100A
-
-/* Device Control API: ARM VGIC */
-#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
-#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
-#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
-#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
-#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
- (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
-#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
-#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
-#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
-#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
-#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
-#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
-#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
-#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
-#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
- (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
-#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-
-/* Device Control API on vcpu fd */
-#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_TIMER_CTRL 1
-#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
-#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
-
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
-#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
-#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
-#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
-
-/* KVM_IRQ_LINE irq field index values */
-#define KVM_ARM_IRQ_TYPE_SHIFT 24
-#define KVM_ARM_IRQ_TYPE_MASK 0xff
-#define KVM_ARM_IRQ_VCPU_SHIFT 16
-#define KVM_ARM_IRQ_VCPU_MASK 0xff
-#define KVM_ARM_IRQ_NUM_SHIFT 0
-#define KVM_ARM_IRQ_NUM_MASK 0xffff
-
-/* irq_type field */
-#define KVM_ARM_IRQ_TYPE_CPU 0
-#define KVM_ARM_IRQ_TYPE_SPI 1
-#define KVM_ARM_IRQ_TYPE_PPI 2
-
-/* out-of-kernel GIC cpu interrupt injection irq_number field */
-#define KVM_ARM_IRQ_CPU_IRQ 0
-#define KVM_ARM_IRQ_CPU_FIQ 1
-
-/*
- * This used to hold the highest supported SPI, but it is now obsolete
- * and only here to provide source code level compatibility with older
- * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
- */
-#ifndef __KERNEL__
-#define KVM_ARM_IRQ_GIC_MAX 127
-#endif
-
-/* One single KVM irqchip, ie. the VGIC */
-#define KVM_NR_IRQCHIPS 1
-
-/* PSCI interface */
-#define KVM_PSCI_FN_BASE 0x95c1ba5e
-#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
-
-#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
-#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
-#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
-#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
-
-#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
-#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
-#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
-#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
-
-#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/arm/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
index ce59448458b2..a3c046174e6b 100644
--- a/arch/arm/include/uapi/asm/perf_regs.h
+++ b/arch/arm/include/uapi/asm/perf_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_ARM_PERF_REGS_H
#define _ASM_ARM_PERF_REGS_H
diff --git a/arch/arm/include/uapi/asm/posix_types.h b/arch/arm/include/uapi/asm/posix_types.h
index d2de9cbbcd9b..6bf11ad5d6ba 100644
--- a/arch/arm/include/uapi/asm/posix_types.h
+++ b/arch/arm/include/uapi/asm/posix_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/posix_types.h
*
diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
index 5af0ed1b825a..8896c23ccba7 100644
--- a/arch/arm/include/uapi/asm/ptrace.h
+++ b/arch/arm/include/uapi/asm/ptrace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/ptrace.h
*
@@ -25,12 +26,16 @@
#define PTRACE_GET_THREAD_AREA 22
#define PTRACE_SET_SYSCALL 23
/* PTRACE_SYSCALL is 24 */
-#define PTRACE_GETCRUNCHREGS 25
-#define PTRACE_SETCRUNCHREGS 26
+#define PTRACE_GETCRUNCHREGS 25 /* obsolete */
+#define PTRACE_SETCRUNCHREGS 26 /* obsolete */
#define PTRACE_GETVFPREGS 27
#define PTRACE_SETVFPREGS 28
#define PTRACE_GETHBPREGS 29
#define PTRACE_SETHBPREGS 30
+#define PTRACE_GETFDPIC 31
+
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
/*
* PSR bits
@@ -53,6 +58,7 @@
#endif
#define FIQ_MODE 0x00000011
#define IRQ_MODE 0x00000012
+#define MON_MODE 0x00000016
#define ABT_MODE 0x00000017
#define HYP_MODE 0x0000001a
#define UND_MODE 0x0000001b
diff --git a/arch/arm/include/uapi/asm/setup.h b/arch/arm/include/uapi/asm/setup.h
index 979ff4016404..8e50e034fec7 100644
--- a/arch/arm/include/uapi/asm/setup.h
+++ b/arch/arm/include/uapi/asm/setup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* linux/include/asm/setup.h
*
@@ -8,7 +9,7 @@
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See Documentation/arch/arm/setup.rst
* for more info.
*/
#ifndef _UAPI__ASMARM_SETUP_H
diff --git a/arch/arm/include/uapi/asm/sigcontext.h b/arch/arm/include/uapi/asm/sigcontext.h
index fc0b80b6a6fc..e223c65adabd 100644
--- a/arch/arm/include/uapi/asm/sigcontext.h
+++ b/arch/arm/include/uapi/asm/sigcontext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASMARM_SIGCONTEXT_H
#define _ASMARM_SIGCONTEXT_H
diff --git a/arch/arm/include/uapi/asm/signal.h b/arch/arm/include/uapi/asm/signal.h
index 33073bdcf091..9e2178420db2 100644
--- a/arch/arm/include/uapi/asm/signal.h
+++ b/arch/arm/include/uapi/asm/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASMARM_SIGNAL_H
#define _UAPI_ASMARM_SIGNAL_H
@@ -59,33 +60,12 @@ typedef unsigned long sigset_t;
#define SIGSWI 32
/*
- * SA_FLAGS values:
- *
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_SIGINFO deliver the signal with SIGINFO structs
- * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task
- * is running in 26-bit.
- * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)).
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NODEFER prevents the current signal from being masked in the handler.
- * SA_RESETHAND clears the handler when the signal is delivered.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
+ * SA_THIRTYTWO historically meant deliver the signal in 32-bit mode, even if
+ * the task is running in 26-bit. But since the kernel no longer supports
+ * 26-bit mode, the flag has no effect.
*/
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
#define SA_THIRTYTWO 0x02000000
#define SA_RESTORER 0x04000000
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
@@ -113,7 +93,7 @@ struct sigaction {
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
- size_t ss_size;
+ __kernel_size_t ss_size;
} stack_t;
diff --git a/arch/arm/include/uapi/asm/stat.h b/arch/arm/include/uapi/asm/stat.h
index 42c0c13999d5..9c6580bfc04d 100644
--- a/arch/arm/include/uapi/asm/stat.h
+++ b/arch/arm/include/uapi/asm/stat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASMARM_STAT_H
#define _ASMARM_STAT_H
diff --git a/arch/arm/include/uapi/asm/statfs.h b/arch/arm/include/uapi/asm/statfs.h
index 079447c05ba7..177f08540079 100644
--- a/arch/arm/include/uapi/asm/statfs.h
+++ b/arch/arm/include/uapi/asm/statfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASMARM_STATFS_H
#define _ASMARM_STATFS_H
diff --git a/arch/arm/include/uapi/asm/swab.h b/arch/arm/include/uapi/asm/swab.h
index 6fcb32a5c453..301aa8d8e320 100644
--- a/arch/arm/include/uapi/asm/swab.h
+++ b/arch/arm/include/uapi/asm/swab.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/byteorder.h
*
diff --git a/arch/arm/include/uapi/asm/types.h b/arch/arm/include/uapi/asm/types.h
index 9435a42f575e..1a667bc26510 100644
--- a/arch/arm/include/uapi/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_TYPES_H
#define _UAPI_ASM_TYPES_H
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 28bd456494a3..a1149911464c 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/unistd.h
*
@@ -14,6 +15,7 @@
#define _UAPI__ASM_ARM_UNISTD_H
#define __NR_OABI_SYSCALL_BASE 0x900000
+#define __NR_SYSCALL_MASK 0x0fffff
#if defined(__thumb__) || defined(__ARM_EABI__)
#define __NR_SYSCALL_BASE 0
@@ -23,7 +25,6 @@
#include <asm/unistd-oabi.h>
#endif
-#include <asm/unistd-common.h>
#define __NR_sync_file_range2 __NR_arm_sync_file_range
/*
@@ -35,5 +36,6 @@
#define __ARM_NR_usr26 (__ARM_NR_BASE+3)
#define __ARM_NR_usr32 (__ARM_NR_BASE+4)
#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
+#define __ARM_NR_get_tls (__ARM_NR_BASE+6)
#endif /* _UAPI__ASM_ARM_UNISTD_H */