summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild41
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/arch_gicv3.h261
-rw-r--r--arch/arm/include/asm/arch_timer.h97
-rw-r--r--arch/arm/include/asm/archrandom.h12
-rw-r--r--arch/arm/include/asm/arm-cci.h31
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h280
-rw-r--r--arch/arm/include/asm/assembler.h490
-rw-r--r--arch/arm/include/asm/atomic.h542
-rw-r--r--arch/arm/include/asm/auxvec.h1
-rw-r--r--arch/arm/include/asm/bL_switcher.h74
-rw-r--r--arch/arm/include/asm/barrier.h95
-rw-r--r--arch/arm/include/asm/bitops.h105
-rw-r--r--arch/arm/include/asm/bitrev.h21
-rw-r--r--arch/arm/include/asm/bug.h29
-rw-r--r--arch/arm/include/asm/bugs.h13
-rw-r--r--arch/arm/include/asm/cache.h9
-rw-r--r--arch/arm/include/asm/cacheflush.h117
-rw-r--r--arch/arm/include/asm/cachetype.h55
-rw-r--r--arch/arm/include/asm/checksum.h63
-rw-r--r--arch/arm/include/asm/clkdev.h29
-rw-r--r--arch/arm/include/asm/clocksource.h7
-rw-r--r--arch/arm/include/asm/cmpxchg.h135
-rw-r--r--arch/arm/include/asm/compiler.h16
-rw-r--r--arch/arm/include/asm/cp15.h28
-rw-r--r--arch/arm/include/asm/cpu.h6
-rw-r--r--arch/arm/include/asm/cpufeature.h35
-rw-r--r--arch/arm/include/asm/cpuidle.h31
-rw-r--r--arch/arm/include/asm/cputype.h175
-rw-r--r--arch/arm/include/asm/cti.h159
-rw-r--r--arch/arm/include/asm/current.h65
-rw-r--r--arch/arm/include/asm/dcc.h33
-rw-r--r--arch/arm/include/asm/delay.h36
-rw-r--r--arch/arm/include/asm/device.h11
-rw-r--r--arch/arm/include/asm/div64.h287
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h14
-rw-r--r--arch/arm/include/asm/dma-mapping.h273
-rw-r--r--arch/arm/include/asm/dma.h18
-rw-r--r--arch/arm/include/asm/dmi.h15
-rw-r--r--arch/arm/include/asm/domain.h80
-rw-r--r--arch/arm/include/asm/ecard.h3
-rw-r--r--arch/arm/include/asm/edac.h18
-rw-r--r--arch/arm/include/asm/efi.h94
-rw-r--r--arch/arm/include/asm/elf.h56
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S39
-rw-r--r--arch/arm/include/asm/exception.h8
-rw-r--r--arch/arm/include/asm/fb.h19
-rw-r--r--arch/arm/include/asm/fiq.h1
-rw-r--r--arch/arm/include/asm/firmware.h23
-rw-r--r--arch/arm/include/asm/fixmap.h97
-rw-r--r--arch/arm/include/asm/flat.h16
-rw-r--r--arch/arm/include/asm/floppy.h99
-rw-r--r--arch/arm/include/asm/fncpy.h14
-rw-r--r--arch/arm/include/asm/fpstate.h16
-rw-r--r--arch/arm/include/asm/fpu.h15
-rw-r--r--arch/arm/include/asm/ftrace.h49
-rw-r--r--arch/arm/include/asm/futex.h81
-rw-r--r--arch/arm/include/asm/glue-cache.h43
-rw-r--r--arch/arm/include/asm/glue-df.h13
-rw-r--r--arch/arm/include/asm/glue-pf.h5
-rw-r--r--arch/arm/include/asm/glue-proc.h23
-rw-r--r--arch/arm/include/asm/glue.h5
-rw-r--r--arch/arm/include/asm/gpio.h32
-rw-r--r--arch/arm/include/asm/hardirq.h28
-rw-r--r--arch/arm/include/asm/hardware/arm_timer.h35
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h100
-rw-r--r--arch/arm/include/asm/hardware/cache-b15-rac.h10
-rw-r--r--arch/arm/include/asm/hardware/cache-feroceon-l2.h9
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h148
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h5
-rw-r--r--arch/arm/include/asm/hardware/cache-uniphier.h21
-rw-r--r--arch/arm/include/asm/hardware/coresight.h157
-rw-r--r--arch/arm/include/asm/hardware/cp14.h534
-rw-r--r--arch/arm/include/asm/hardware/debug-8250.S29
-rw-r--r--arch/arm/include/asm/hardware/debug-pl01x.S29
-rw-r--r--arch/arm/include/asm/hardware/dec21285.h25
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-iomd.S131
-rw-r--r--arch/arm/include/asm/hardware/icst.h59
-rw-r--r--arch/arm/include/asm/hardware/ioc.h5
-rw-r--r--arch/arm/include/asm/hardware/iomd.h5
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h962
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-gpio.h75
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h324
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h123
-rw-r--r--arch/arm/include/asm/hardware/it8152.h115
-rw-r--r--arch/arm/include/asm/hardware/locomo.h10
-rw-r--r--arch/arm/include/asm/hardware/memc.h5
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h49
-rw-r--r--arch/arm/include/asm/hardware/scoop.h6
-rw-r--r--arch/arm/include/asm/hardware/ssp.h5
-rw-r--r--arch/arm/include/asm/hardware/timer-sp.h23
-rw-r--r--arch/arm/include/asm/highmem.h49
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h50
-rw-r--r--arch/arm/include/asm/hugetlb.h72
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h14
-rw-r--r--arch/arm/include/asm/hw_irq.h8
-rw-r--r--arch/arm/include/asm/hwcap.h4
-rw-r--r--arch/arm/include/asm/hypervisor.h6
-rw-r--r--arch/arm/include/asm/ide.h23
-rw-r--r--arch/arm/include/asm/idmap.h5
-rw-r--r--arch/arm/include/asm/insn.h47
-rw-r--r--arch/arm/include/asm/io.h264
-rw-r--r--arch/arm/include/asm/irq.h18
-rw-r--r--arch/arm/include/asm/irq_work.h12
-rw-r--r--arch/arm/include/asm/irqflags.h22
-rw-r--r--arch/arm/include/asm/jump_label.h36
-rw-r--r--arch/arm/include/asm/kasan.h33
-rw-r--r--arch/arm/include/asm/kasan_def.h81
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/asm/kexec.h35
-rw-r--r--arch/arm/include/asm/kfence.h53
-rw-r--r--arch/arm/include/asm/kgdb.h6
-rw-r--r--arch/arm/include/asm/kmap_types.h9
-rw-r--r--arch/arm/include/asm/kprobes.h68
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h9
-rw-r--r--arch/arm/include/asm/kvm_arm.h217
-rw-r--r--arch/arm/include/asm/kvm_asm.h83
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h160
-rw-r--r--arch/arm/include/asm/kvm_host.h232
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h135
-rw-r--r--arch/arm/include/asm/kvm_psci.h23
-rw-r--r--arch/arm/include/asm/limits.h11
-rw-r--r--arch/arm/include/asm/linkage.h1
-rw-r--r--arch/arm/include/asm/localtimer.h34
-rw-r--r--arch/arm/include/asm/mach-types.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h29
-rw-r--r--arch/arm/include/asm/mach/dma.h10
-rw-r--r--arch/arm/include/asm/mach/flash.h7
-rw-r--r--arch/arm/include/asm/mach/irda.h20
-rw-r--r--arch/arm/include/asm/mach/irq.h9
-rw-r--r--arch/arm/include/asm/mach/map.h35
-rw-r--r--arch/arm/include/asm/mach/pci.h29
-rw-r--r--arch/arm/include/asm/mach/sharpsl_param.h6
-rw-r--r--arch/arm/include/asm/mach/time.h13
-rw-r--r--arch/arm/include/asm/mc146818rtc.h1
-rw-r--r--arch/arm/include/asm/mcpm.h203
-rw-r--r--arch/arm/include/asm/mcs_spinlock.h24
-rw-r--r--arch/arm/include/asm/memblock.h5
-rw-r--r--arch/arm/include/asm/memory.h298
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/include/asm/mmu.h19
-rw-r--r--arch/arm/include/asm/mmu_context.h80
-rw-r--r--arch/arm/include/asm/module.h68
-rw-r--r--arch/arm/include/asm/module.lds.h7
-rw-r--r--arch/arm/include/asm/mpu.h113
-rw-r--r--arch/arm/include/asm/mtd-xip.h5
-rw-r--r--arch/arm/include/asm/mutex.h21
-rw-r--r--arch/arm/include/asm/neon.h33
-rw-r--r--arch/arm/include/asm/nwflash.h2
-rw-r--r--arch/arm/include/asm/opcodes-sec.h9
-rw-r--r--arch/arm/include/asm/opcodes-virt.h15
-rw-r--r--arch/arm/include/asm/opcodes.h14
-rw-r--r--arch/arm/include/asm/outercache.h90
-rw-r--r--arch/arm/include/asm/page-nommu.h14
-rw-r--r--arch/arm/include/asm/page.h47
-rw-r--r--arch/arm/include/asm/paravirt.h22
-rw-r--r--arch/arm/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/arm/include/asm/pci.h50
-rw-r--r--arch/arm/include/asm/percpu.h50
-rw-r--r--arch/arm/include/asm/perf_event.h27
-rw-r--r--arch/arm/include/asm/pgalloc.h87
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h9
-rw-r--r--arch/arm/include/asm/pgtable-2level-types.h14
-rw-r--r--arch/arm/include/asm/pgtable-2level.h85
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h51
-rw-r--r--arch/arm/include/asm/pgtable-3level-types.h14
-rw-r--r--arch/arm/include/asm/pgtable-3level.h121
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h37
-rw-r--r--arch/arm/include/asm/pgtable.h240
-rw-r--r--arch/arm/include/asm/pmu.h111
-rw-r--r--arch/arm/include/asm/probes.h49
-rw-r--r--arch/arm/include/asm/proc-fns.h95
-rw-r--r--arch/arm/include/asm/processor.h95
-rw-r--r--arch/arm/include/asm/procinfo.h5
-rw-r--r--arch/arm/include/asm/prom.h14
-rw-r--r--arch/arm/include/asm/psci.h33
-rw-r--r--arch/arm/include/asm/ptdump.h41
-rw-r--r--arch/arm/include/asm/ptrace.h77
-rw-r--r--arch/arm/include/asm/scatterlist.h12
-rw-r--r--arch/arm/include/asm/sched_clock.h4
-rw-r--r--arch/arm/include/asm/seccomp.h16
-rw-r--r--arch/arm/include/asm/sections.h26
-rw-r--r--arch/arm/include/asm/secure_cntvoff.h8
-rw-r--r--arch/arm/include/asm/semihost.h30
-rw-r--r--arch/arm/include/asm/set_memory.h22
-rw-r--r--arch/arm/include/asm/setup.h55
-rw-r--r--arch/arm/include/asm/shmparam.h1
-rw-r--r--arch/arm/include/asm/signal.h8
-rw-r--r--arch/arm/include/asm/simd.h22
-rw-r--r--arch/arm/include/asm/smp.h48
-rw-r--r--arch/arm/include/asm/smp_plat.h29
-rw-r--r--arch/arm/include/asm/smp_scu.h16
-rw-r--r--arch/arm/include/asm/smp_twd.h17
-rw-r--r--arch/arm/include/asm/sparsemem.h10
-rw-r--r--arch/arm/include/asm/spectre.h42
-rw-r--r--arch/arm/include/asm/spinlock.h126
-rw-r--r--arch/arm/include/asm/spinlock_types.h7
-rw-r--r--arch/arm/include/asm/stackprotector.h18
-rw-r--r--arch/arm/include/asm/stacktrace.h48
-rw-r--r--arch/arm/include/asm/string.h53
-rw-r--r--arch/arm/include/asm/suspend.h6
-rw-r--r--arch/arm/include/asm/swab.h1
-rw-r--r--arch/arm/include/asm/switch_to.h17
-rw-r--r--arch/arm/include/asm/sync_bitops.h31
-rw-r--r--arch/arm/include/asm/syscall.h91
-rw-r--r--arch/arm/include/asm/syscalls.h51
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/system_info.h2
-rw-r--r--arch/arm/include/asm/system_misc.h17
-rw-r--r--arch/arm/include/asm/tcm.h22
-rw-r--r--arch/arm/include/asm/text-patching.h18
-rw-r--r--arch/arm/include/asm/therm.h1
-rw-r--r--arch/arm/include/asm/thread_info.h136
-rw-r--r--arch/arm/include/asm/thread_notify.h5
-rw-r--r--arch/arm/include/asm/timex.h12
-rw-r--r--arch/arm/include/asm/tlb.h186
-rw-r--r--arch/arm/include/asm/tlbflush.h246
-rw-r--r--arch/arm/include/asm/tls.h96
-rw-r--r--arch/arm/include/asm/topology.h37
-rw-r--r--arch/arm/include/asm/traps.h37
-rw-r--r--arch/arm/include/asm/uaccess-asm.h161
-rw-r--r--arch/arm/include/asm/uaccess.h649
-rw-r--r--arch/arm/include/asm/ucontext.h20
-rw-r--r--arch/arm/include/asm/unified.h109
-rw-r--r--arch/arm/include/asm/unistd.h36
-rw-r--r--arch/arm/include/asm/unwind.h22
-rw-r--r--arch/arm/include/asm/uprobes.h42
-rw-r--r--arch/arm/include/asm/user.h5
-rw-r--r--arch/arm/include/asm/v7m.h58
-rw-r--r--arch/arm/include/asm/vdso.h33
-rw-r--r--arch/arm/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm/include/asm/vdso/cp15.h38
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h140
-rw-r--r--arch/arm/include/asm/vdso/processor.h22
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h22
-rw-r--r--arch/arm/include/asm/vermagic.h31
-rw-r--r--arch/arm/include/asm/vfp.h27
-rw-r--r--arch/arm/include/asm/vfpmacros.h25
-rw-r--r--arch/arm/include/asm/vga.h2
-rw-r--r--arch/arm/include/asm/virt.h44
-rw-r--r--arch/arm/include/asm/vmalloc.h4
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h177
-rw-r--r--arch/arm/include/asm/word-at-a-time.h24
-rw-r--r--arch/arm/include/asm/xen/events.h9
-rw-r--r--arch/arm/include/asm/xen/hypercall.h72
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h20
-rw-r--r--arch/arm/include/asm/xen/interface.h81
-rw-r--r--arch/arm/include/asm/xen/page.h92
-rw-r--r--arch/arm/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h2
-rw-r--r--arch/arm/include/asm/xor.h110
255 files changed, 8066 insertions, 7811 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9c..03657ff8fbe3 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,35 +1,8 @@
-
-
-generic-y += auxvec.h
-generic-y += bitsperlong.h
-generic-y += cputime.h
-generic-y += current.h
-generic-y += emergency-restart.h
-generic-y += errno.h
-generic-y += exec.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += msgbuf.h
-generic-y += param.h
+# SPDX-License-Identifier: GPL-2.0
+generic-y += early_ioremap.h
+generic-y += extable.h
+generic-y += flat.h
generic-y += parport.h
-generic-y += poll.h
-generic-y += resource.h
-generic-y += sections.h
-generic-y += segment.h
-generic-y += sembuf.h
-generic-y += serial.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += sizes.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += timex.h
-generic-y += trace_clock.h
-generic-y += types.h
-generic-y += unaligned.h
+
+generated-y += mach-types.h
+generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70c..000000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
new file mode 100644
index 000000000000..311e83038bdb
--- /dev/null
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/arch_gicv3.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+#ifndef __ASM_ARCH_GICV3_H
+#define __ASM_ARCH_GICV3_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+
+#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
+#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
+#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0)
+#define ICC_SGI1R __ACCESS_CP15_64(0, c12)
+#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0)
+#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4)
+#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
+#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
+#define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3)
+#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3)
+
+#define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x)
+#define ICC_AP0R0 __ICC_AP0Rx(0)
+#define ICC_AP0R1 __ICC_AP0Rx(1)
+#define ICC_AP0R2 __ICC_AP0Rx(2)
+#define ICC_AP0R3 __ICC_AP0Rx(3)
+
+#define __ICC_AP1Rx(x) __ACCESS_CP15(c12, 0, c9, x)
+#define ICC_AP1R0 __ICC_AP1Rx(0)
+#define ICC_AP1R1 __ICC_AP1Rx(1)
+#define ICC_AP1R2 __ICC_AP1Rx(2)
+#define ICC_AP1R3 __ICC_AP1Rx(3)
+
+#define CPUIF_MAP(a32, a64) \
+static inline void write_ ## a64(u32 val) \
+{ \
+ write_sysreg(val, a32); \
+} \
+static inline u32 read_ ## a64(void) \
+{ \
+ return read_sysreg(a32); \
+} \
+
+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
+CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
+CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
+CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
+CPUIF_MAP(ICC_AP0R2, ICC_AP0R2_EL1)
+CPUIF_MAP(ICC_AP0R3, ICC_AP0R3_EL1)
+CPUIF_MAP(ICC_AP1R0, ICC_AP1R0_EL1)
+CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
+CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
+CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
+
+#define read_gicreg(r) read_##r()
+#define write_gicreg(v, r) write_##r(v)
+
+/* Low-level accessors */
+
+static inline void gic_write_dir(u32 val)
+{
+ write_sysreg(val, ICC_DIR);
+ isb();
+}
+
+static inline u32 gic_read_iar(void)
+{
+ u32 irqstat = read_sysreg(ICC_IAR1);
+
+ dsb(sy);
+
+ return irqstat;
+}
+
+static inline void gic_write_ctlr(u32 val)
+{
+ write_sysreg(val, ICC_CTLR);
+ isb();
+}
+
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg(ICC_CTLR);
+}
+
+static inline void gic_write_grpen1(u32 val)
+{
+ write_sysreg(val, ICC_IGRPEN1);
+ isb();
+}
+
+static inline void gic_write_sgi1r(u64 val)
+{
+ write_sysreg(val, ICC_SGI1R);
+}
+
+static inline u32 gic_read_sre(void)
+{
+ return read_sysreg(ICC_SRE);
+}
+
+static inline void gic_write_sre(u32 val)
+{
+ write_sysreg(val, ICC_SRE);
+ isb();
+}
+
+static inline void gic_write_bpr1(u32 val)
+{
+ write_sysreg(val, ICC_BPR1);
+}
+
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg(ICC_PMR);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg(val, ICC_PMR);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg(ICC_RPR);
+}
+
+/*
+ * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
+ * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
+ * make much sense.
+ * Moreover, 64bit I/O emulation is extremely difficult to implement on
+ * AArch32, since the syndrome register doesn't provide any information for
+ * them.
+ * Consequently, the following IO helpers use 32bit accesses.
+ */
+static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed((u32)val, addr);
+ writel_relaxed((u32)(val >> 32), addr + 4);
+}
+
+static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ val = readl_relaxed(addr);
+ val |= (u64)readl_relaxed(addr + 4) << 32;
+ return val;
+}
+
+#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+/*
+ * GICD_IROUTERn, contain the affinity values associated to each interrupt.
+ * The upper-word (aff3) will always be 0, so there is no need for a lock.
+ */
+#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gic_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_BASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_baser(c) __gic_readq_nonatomic(c)
+#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
+ * won't be being used during any updates and can be changed non-atomically
+ */
+#define gicr_read_propbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_xLPIR - only the lower bits are significant
+ */
+#define gic_read_lpir(c) readl_relaxed(c)
+#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
+
+/*
+ * GITS_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gits_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_CBASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_cbaser(c) __gic_readq_nonatomic(c)
+#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_CWRITER - hi and lo bits may be accessed independently.
+ */
+#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
+ */
+#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
+ * anything else.
+ */
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr + 4);
+ if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
+ tmp &= ~(GICR_VPENDBASER_Valid >> 32);
+ writel_relaxed(tmp, addr + 4);
+ }
+
+ /*
+ * Use the fact that __gic_writeq_nonatomic writes the second
+ * half of the 64bit quantity after the first.
+ */
+ __gic_writeq_nonatomic(val, addr);
+}
+
+#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
+
+static inline bool gic_prio_masking_enabled(void)
+{
+ return false;
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return false;
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index accefe099182..bb129b6d2366 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,15 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
#include <asm/barrier.h>
#include <asm/errno.h>
+#include <asm/hwcap.h>
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
+/* 32bit ARM doesn't know anything about timer errata... */
+#define has_erratum_handler(h) (false)
+#define erratum_handler(h) (arch_timer_##h)
+
int arch_timer_arch_init(void);
/*
@@ -17,34 +24,40 @@ int arch_timer_arch_init(void);
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
-static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
-
- isb();
}
-static inline u32 arch_timer_reg_read(const int access, const int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val = 0;
@@ -53,21 +66,19 @@ static inline u32 arch_timer_reg_read(const int access, const int reg)
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
return val;
@@ -80,7 +91,21 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static inline u64 __arch_counter_get_cntvct(void)
{
u64 cval;
@@ -89,16 +114,32 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u64 __arch_counter_get_cntvct_stable(void)
{
- u32 cntkctl;
+ return __arch_counter_get_cntvct();
+}
+static inline u32 arch_timer_get_cntkctl(void)
+{
+ u32 cntkctl;
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
+}
- /* disable user access to everything */
- cntkctl &= ~((3 << 8) | (7 << 0));
-
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+ isb();
+}
+
+static inline void arch_timer_set_evtstrm_feature(void)
+{
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
+static inline bool arch_timer_have_evtstrm_feature(void)
+{
+ return elf_hwcap & HWCAP_EVTSTRM;
}
#endif
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
new file mode 100644
index 000000000000..cc4714eb1a75
--- /dev/null
+++ b/arch/arm/include/asm/archrandom.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
+#include <asm-generic/archrandom.h>
+
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm/include/asm/arm-cci.h b/arch/arm/include/asm/arm-cci.h
new file mode 100644
index 000000000000..7537bd790657
--- /dev/null
+++ b/arch/arm/include/asm/arm-cci.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/arm-cci.h
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+
+#ifndef __ASM_ARM_CCI_H
+#define __ASM_ARM_CCI_H
+
+#ifdef CONFIG_MCPM
+#include <asm/mcpm.h>
+
+/*
+ * We don't have a reliable way of detecting whether,
+ * if we have access to secure-only registers, unless
+ * mcpm is registered.
+ */
+static inline bool platform_has_secure_cci_access(void)
+{
+ return mcpm_is_available();
+}
+
+#else
+static inline bool platform_has_secure_cci_access(void)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..2ec0e5e83fc9
--- /dev/null
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+
+#define PMCCNTR __ACCESS_CP15_64(0, c9)
+
+#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
+#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
+#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
+#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
+#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
+#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
+#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
+#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
+#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
+#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
+#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
+#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(PMEVCNTR##n)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, PMEVCNTR##n)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, PMEVTYPER##n)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_sysreg(PMMIR);
+}
+
+static inline u32 read_pmuver(void)
+{
+ /* PMUVers is not a signed field */
+ u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
+
+ return (dfr0 >> 24) & 0xf;
+}
+
+static inline bool pmuv3_has_icntr(void)
+{
+ /* FEAT_PMUv3_ICNTR not accessible for 32-bit */
+ return false;
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, PMCR);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(PMCR);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, PMSELR);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, PMCCNTR);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(PMCCNTR);
+}
+
+static inline void write_pmicntr(u64 val) {}
+
+static inline u64 read_pmicntr(void)
+{
+ return 0;
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, PMCNTENSET);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, PMCNTENCLR);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, PMINTENSET);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, PMINTENCLR);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, PMCCFILTR);
+}
+
+static inline void write_pmicfiltr(u64 val) {}
+
+static inline u64 read_pmicfiltr(void)
+{
+ return 0;
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, PMOVSR);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(PMOVSR);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, PMUSERENR);
+}
+
+static inline void write_pmuacr(u64 val) {}
+
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return false;
+}
+
+static inline bool kvm_set_pmuserenr(u64 val)
+{
+ return false;
+}
+
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+/* PMU Version in DFR Register */
+#define ARMV8_PMU_DFR_VER_NI 0
+#define ARMV8_PMU_DFR_VER_V3P1 0x4
+#define ARMV8_PMU_DFR_VER_V3P4 0x5
+#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_V3P9 0x9
+#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
+ pmuver == ARMV8_PMU_DFR_VER_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+}
+
+static inline bool is_pmuv3p9(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
+}
+
+static inline u64 read_pmceid0(void)
+{
+ u64 val = read_sysreg(PMCEID0);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID2) << 32;
+
+ return val;
+}
+
+static inline u64 read_pmceid1(void)
+{
+ u64 val = read_sysreg(PMCEID1);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID3) << 32;
+
+ return val;
+}
+
+#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a5fef710af32..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/assembler.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains arm architecture specific defines
* for the different processors.
*
@@ -21,8 +18,12 @@
#endif
#include <asm/ptrace.h>
-#include <asm/domain.h>
#include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#define IOMEM(x) (x)
@@ -30,8 +31,8 @@
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
-#define pull lsr
-#define push lsl
+#define lspull lsr
+#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
@@ -41,8 +42,8 @@
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
-#define pull lsl
-#define push lsr
+#define lspull lsl
+#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
@@ -53,6 +54,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
@@ -77,6 +85,12 @@
#define CALGN(code...)
#endif
+#define IMM12_MASK 0xfff
+
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
/*
* Enable and disable interrupts
*/
@@ -98,33 +112,47 @@
.endm
#endif
- .macro asm_trace_hardirqs_off
+#if __LINUX_ARM_ARCH__ < 7
+ .macro dsb, args
+ mcr p15, 0, r0, c7, c10, 4
+ .endm
+
+ .macro isb, args
+ mcr p15, 0, r0, c7, c5, 4
+ .endm
+#endif
+
+ .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl trace_hardirqs_off
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on_cond, cond
+ .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
*/
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl\cond trace_hardirqs_on
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on
- asm_trace_hardirqs_on_cond al
- .endm
-
- .macro disable_irq
+ .macro disable_irq, save=1
disable_irq_notrace
- asm_trace_hardirqs_off
+ asm_trace_hardirqs_off \save
.endm
.macro enable_irq
@@ -145,7 +173,11 @@
.endm
.macro save_and_disable_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
mrs \oldcpsr, cpsr
+#endif
disable_irq_notrace
.endm
@@ -163,17 +195,65 @@
.macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT
- asm_trace_hardirqs_on_cond eq
+ asm_trace_hardirqs_on cond=eq
restore_irqs_notrace \oldcpsr
.endm
-#define USER(x...) \
+/*
+ * Assembly version of "adr rd, BSYM(sym)". This should only be used to
+ * reference local symbols in the same assembly file which are to be
+ * resolved by the assembler. Other usage is undefined.
+ */
+ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+ .macro badr\c, rd, sym
+#ifdef CONFIG_THUMB2_KERNEL
+ adr\c \rd, \sym + 1
+#else
+ adr\c \rd, \sym
+#endif
+ .endm
+ .endr
+
+/*
+ * Get current thread_info.
+ */
+ .macro get_thread_info, rd
+ /* thread_info is the first member of struct task_struct */
+ get_current \rd
+ .endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+ .macro inc_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ add \tmp, \tmp, #1 @ increment it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
+ sub \tmp, \tmp, #1 @ decrement it
+ str \tmp, [\ti, #TI_PREEMPT]
+ .endm
+#else
+ .macro inc_preempt_count, ti, tmp
+ .endm
+
+ .macro dec_preempt_count, ti, tmp
+ .endm
+#endif
+
+#define USERL(l, x...) \
9999: x; \
.pushsection __ex_table,"a"; \
.align 3; \
- .long 9999b,9001f; \
+ .long 9999b,l; \
.popsection
+#define USER(x...) USERL(9001f, x)
+
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
@@ -184,17 +264,21 @@
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
+ .align 2 ;\
+ .long 9998b - . ;\
9997: instr ;\
+ .if . - 9997b == 2 ;\
+ nop ;\
+ .endif ;\
.if . - 9997b != 4 ;\
.error "ALT_UP() content must assemble to exactly 4 bytes";\
.endif ;\
.popsection
#define ALT_UP_B(label) \
- .equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
- W(b) . + up_b_offset ;\
+ .align 2 ;\
+ .long 9998b - . ;\
+ W(b) . + (label - 9998b) ;\
.popsection
#else
#define ALT_SMP(instr...)
@@ -202,6 +286,80 @@
#define ALT_UP_B(label) b label
#endif
+ /*
+ * this_cpu_offset - load the per-CPU offset of this CPU into
+ * register 'rd'
+ */
+ .macro this_cpu_offset, rd:req
+#ifdef CONFIG_SMP
+ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L1_\@)
+.L0_\@:
+ .subsection 1
+.L1_\@: ldr_va \rd, __per_cpu_offset
+ b .L0_\@
+ .previous
+#endif
+#else
+ mov \rd, #0
+#endif
+ .endm
+
+ /*
+ * set_current - store the task pointer of this CPU's current task
+ */
+ .macro set_current, rn:req, tmp:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: str_va \rn, __current, \tmp
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ str_va \rn, __current, \tmp
+#endif
+ .endm
+
+ /*
+ * get_current - load the task pointer of this CPU's current task
+ */
+ .macro get_current, rd:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: ldr_va \rd, __current
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ ldr_va \rd, __current
+#endif
+ .endm
+
+ /*
+ * reload_current - reload the task pointer of this CPU's current task
+ * into the TLS register
+ */
+ .macro reload_current, t1:req, t2:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+#endif
+ ldr_this_cpu \t1, __entry_task, \t1, \t2
+ mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
+.L0_\@:
+#endif
+ .endm
+
/*
* Instruction barrier
*/
@@ -220,9 +378,9 @@
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
- ALT_SMP(dmb)
+ ALT_SMP(dmb ish)
.else
- ALT_SMP(W(dmb))
+ ALT_SMP(W(dmb) ish)
.endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
@@ -237,6 +395,23 @@
#endif
.endm
+/*
+ * Raw SMP data memory barrier
+ */
+ .macro __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ dmb ish
+ .else
+ W(dmb) ish
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#else
+ .error "Incompatible SMP platform"
+#endif
+ .endm
+
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
@@ -263,7 +438,7 @@
* you cannot return to the original mode.
*/
.macro safe_svcmode_maskall reg:req
-#if __LINUX_ARM_ARCH__ >= 6
+#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
mrs \reg , cpsr
eor \reg, \reg, #HYP_MODE
tst \reg, #MODE_MASK
@@ -272,7 +447,7 @@
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
- adr lr, BSYM(2f)
+ badr lr, 2f
msr spsr_cxsf, \reg
__MSR_ELR_HYP(14)
__ERET
@@ -295,9 +470,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999:
.if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+ \instr\()b\t\cond\().w \reg, [\ptr, #\off]
.elseif \inc == 4
- \instr\cond\()\t\().w \reg, [\ptr, #\off]
+ \instr\t\cond\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
@@ -336,9 +511,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.rept \rept
9999:
.if \inc == 1
- \instr\cond\()b\()\t \reg, [\ptr], #\inc
+ \instr\()b\t\cond \reg, [\ptr], #\inc
.elseif \inc == 4
- \instr\cond\()\t \reg, [\ptr], #\inc
+ \instr\t\cond \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
@@ -368,12 +543,253 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
- adds \tmp, \addr, #\size - 1
- sbcccs \tmp, \tmp, \limit
- bcs \bad
+ .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+ .macro ret\c, reg
+#if __LINUX_ARM_ARCH__ < 6
+ mov\c pc, \reg
+#else
+ .ifeqs "\reg", "lr"
+ bx\c \reg
+ .else
+ mov\c pc, \reg
+ .endif
+#endif
+ .endm
+ .endr
+
+ .macro ret.w, reg
+ ret \reg
+#ifdef CONFIG_THUMB2_KERNEL
+ nop
+#endif
+ .endm
+
+ .macro bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1: .inst 0xde02
+#else
+1: .inst 0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ .pushsection .rodata.str, "aMS", %progbits, 1
+2: .asciz "\msg"
+ .popsection
+ .pushsection __bug_table, "aw"
+ .align 2
+ .word 1b, 2b
+ .hword \line
+ .popsection
+#endif
+ .endm
+
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist", "aw" ; \
+ .balign 4 ; \
+ .long entry; \
+ .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
+ .macro __adldst_l, op, reg, sym, tmp, c
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\c \tmp, .La\@
+ .subsection 1
+ .align 2
+.La\@: .long \sym - .Lpc\@
+ .previous
+ .else
+ .ifnb \c
+ THUMB( ittt \c )
+ .endif
+ movw\c \tmp, #:lower16:\sym - .Lpc\@
+ movt\c \tmp, #:upper16:\sym - .Lpc\@
+ .endif
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .set .Lpc\@, . + 8 // PC bias
+ .ifc \op, add
+ add\c \reg, \tmp, pc
+ .else
+ \op\c \reg, [pc, \tmp]
+ .endif
+#else
+.Lb\@: add\c \tmp, \tmp, pc
+ /*
+ * In Thumb-2 builds, the PC bias depends on whether we are currently
+ * emitting into a .arm or a .thumb section. The size of the add opcode
+ * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
+ * emitting in ARM mode, so let's use this to account for the bias.
+ */
+ .set .Lpc\@, . + (. - .Lb\@)
+
+ .ifnc \op, add
+ \op\c \reg, [\tmp]
+ .endif
+#endif
+ .endm
+
+ /*
+ * mov_l - move a constant value or [relocated] address into a register
+ */
+ .macro mov_l, dst:req, imm:req, cond
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\cond \dst, =\imm
+ .else
+ movw\cond \dst, #:lower16:\imm
+ movt\cond \dst, #:upper16:\imm
+ .endif
+ .endm
+
+ /*
+ * adr_l - adr pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro adr_l, dst:req, sym:req, cond
+ __adldst_l add, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * ldr_l - ldr <literal> pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro ldr_l, dst:req, sym:req, cond
+ __adldst_l ldr, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * str_l - str <literal> pseudo-op with unlimited range
+ *
+ * @src: source register
+ * @sym: name of the symbol
+ * @tmp: mandatory scratch register
+ * @cond: conditional opcode suffix
+ */
+ .macro str_l, src:req, sym:req, tmp:req, cond
+ __adldst_l str, \src, \sym, \tmp, \cond
+ .endm
+
+ .macro __ldst_va, op, reg, tmp, sym, cond, offset
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ mov_l \tmp, \sym, \cond
+#else
+ /*
+ * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
+ * with the appropriate relocations. The combined sequence has a range
+ * of -/+ 256 MiB, which should be sufficient for the core kernel and
+ * for modules loaded into the module region.
+ */
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+.L0_\@: sub\cond \tmp, pc, #8 - \offset
+.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
+.L2_\@:
#endif
+ \op\cond \reg, [\tmp, #\offset]
+ .endm
+
+ /*
+ * ldr_va - load a 32-bit word from the virtual address of \sym
+ */
+ .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
+ .ifnb \tmp
+ __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
+ .else
+ __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
+ .endif
+ .endm
+
+ /*
+ * str_va - store a 32-bit word to the virtual address of \sym
+ */
+ .macro str_va, rn:req, sym:req, tmp:req, cond
+ __ldst_va str, \rn, \tmp, \sym, \cond, 0
+ .endm
+
+ /*
+ * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
+ * without using a temp register. Supported in ARM mode
+ * only.
+ */
+ .macro ldr_this_cpu_armv6, rd:req, sym:req
+ this_cpu_offset \rd
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+ add \rd, \rd, pc
+.L0_\@: sub \rd, \rd, #4
+.L1_\@: sub \rd, \rd, #0
+.L2_\@: ldr \rd, [\rd, #4]
+ .endm
+
+ /*
+ * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
+ * into register 'rd', which may be the stack pointer,
+ * using 't1' and 't2' as general temp registers. These
+ * are permitted to overlap with 'rd' if != sp
+ */
+ .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
+#ifndef CONFIG_SMP
+ ldr_va \rd, \sym, tmp=\t1
+#elif __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ this_cpu_offset \t1
+ mov_l \t2, \sym
+ ldr \rd, [\t1, \t2]
+#else
+ ldr_this_cpu_armv6 \rd, \sym
+#endif
+ .endm
+
+ /*
+ * rev_l - byte-swap a 32-bit value
+ *
+ * @val: source/destination register
+ * @tmp: scratch register
+ */
+ .macro rev_l, val:req, tmp:req
+ .if __LINUX_ARM_ARCH__ < 6
+ eor \tmp, \val, \val, ror #16
+ bic \tmp, \tmp, #0x00ff0000
+ mov \val, \val, ror #8
+ eor \val, \val, \tmp, lsr #8
+ .else
+ rev \val, \val
+ .endif
+ .endm
+
+ .if __LINUX_ARM_ARCH__ < 6
+ .set .Lrev_l_uses_tmp, 1
+ .else
+ .set .Lrev_l_uses_tmp, 0
+ .endif
+
+ /*
+ * bl_r - branch and link to register
+ *
+ * @dst: target to branch to
+ * @c: conditional opcode suffix
+ */
+ .macro bl_r, dst:req, c
+ .if __LINUX_ARM_ARCH__ < 6
+ mov\c lr, pc
+ mov\c pc, \dst
+ .else
+ blx\c \dst
+ .endif
.endm
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..f0e3b01afa74 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -1,24 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/atomic.h
*
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
+#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef __KERNEL__
/*
@@ -26,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -36,87 +32,83 @@
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__("@ atomic_add\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic_add_return\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
- smp_mb();
-
- return result;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic_" #op "\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_" #op "_return\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__("@ atomic_sub\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result, val; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
+"1: ldrex %0, [%4]\n" \
+" " #asm_op " %1, %0, %5\n" \
+" strex %2, %1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic_sub_return\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-
- smp_mb();
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
- return result;
-}
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
- unsigned long oldval, res;
+ int oldval;
+ unsigned long res;
- smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -129,25 +121,37 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- unsigned long tmp, tmp2;
+ int oldval, newval;
+ unsigned long tmp;
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1: ldrex %0, [%4]\n"
+" teq %0, %5\n"
+" beq 2f\n"
+" add %1, %0, %6\n"
+" strex %2, %1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
: "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -155,35 +159,55 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#error SMP not supported on pre-ARMv6 CPUs
#endif
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
-
- raw_local_irq_save(flags);
- val = v->counter;
- v->counter = val += i;
- raw_local_irq_restore(flags);
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ val = v->counter; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
+}
- return val;
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ val = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
}
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
- raw_local_irq_save(flags);
- val = v->counter;
- v->counter = val -= i;
- raw_local_irq_restore(flags);
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
- return val;
-}
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -196,57 +220,46 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- *addr &= ~mask;
- raw_local_irq_restore(flags);
-}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#endif /* __LINUX_ARM_ARCH__ */
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
+#define arch_atomic_andnot arch_atomic_andnot
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, orr)
+ATOMIC_OPS(xor, ^=, eor)
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
- u64 __aligned(8) counter;
+ s64 counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- u64 result;
+ s64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
@@ -257,7 +270,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -266,9 +279,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
);
}
#else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- u64 result;
+ s64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
@@ -279,10 +292,11 @@ static inline u64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- u64 tmp;
+ s64 tmp;
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
@@ -294,92 +308,112 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
}
#endif
-static inline void atomic64_add(u64 i, atomic64_t *v)
-{
- u64 result;
- unsigned long tmp;
-
- __asm__ __volatile__("@ atomic64_add\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ s64 result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic64_" #op "\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+{ \
+ s64 result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_" #op "_return\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
}
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
-{
- u64 result;
- unsigned long tmp;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic64_add_return\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" adds %0, %0, %4\n"
-" adc %H0, %H0, %H4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
-
- smp_mb();
-
- return result;
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+{ \
+ s64 result, val; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
+"1: ldrexd %0, %H0, [%4]\n" \
+" " #op1 " %Q1, %Q0, %Q5\n" \
+" " #op2 " %R1, %R0, %R5\n" \
+" strexd %2, %1, %H1, [%4]\n" \
+" teq %2, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
}
-static inline void atomic64_sub(u64 i, atomic64_t *v)
-{
- u64 result;
- unsigned long tmp;
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
- __asm__ __volatile__("@ atomic64_sub\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
-}
+ATOMIC64_OPS(add, adds, adc)
+ATOMIC64_OPS(sub, subs, sbc)
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
-{
- u64 result;
- unsigned long tmp;
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
- smp_mb();
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
- __asm__ __volatile__("@ atomic64_sub_return\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, %4\n"
-" sbc %H0, %H0, %H4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
+#define arch_atomic64_andnot arch_atomic64_andnot
- smp_mb();
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, orr, orr)
+ATOMIC64_OPS(xor, eor, eor)
- return result;
-}
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
- u64 oldval;
+ s64 oldval;
unsigned long res;
- smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -393,17 +427,16 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
- u64 result;
+ s64 result;
unsigned long tmp;
- smp_mb();
+ prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -414,23 +447,23 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
: "r" (&ptr->counter), "r" (new)
: "cc");
- smp_mb();
-
return result;
}
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
- u64 result;
+ s64 result;
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
-" subs %0, %0, #1\n"
-" sbc %H0, %H0, #0\n"
-" teq %H0, #0\n"
+" subs %Q0, %Q0, #1\n"
+" sbc %R0, %R0, #0\n"
+" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
@@ -444,46 +477,37 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- u64 val;
+ s64 oldval, newval;
unsigned long tmp;
- int ret = 1;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
-" moveq %1, #0\n"
" beq 2f\n"
-" adds %0, %0, %6\n"
-" adc %H0, %H0, %H6\n"
-" strexd %2, %0, %H0, [%4]\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
- if (ret)
+ if (oldval != u)
smp_mb();
- return ret;
+ return oldval;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 000000000000..fbd388c46299
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1 @@
+#include <uapi/asm/auxvec.h>
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..45a75d9381eb
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+ bL_switch_completion_handler completer,
+ void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d90..83ae97c049d9 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
-#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
@@ -11,58 +11,93 @@
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe() do { } while (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#ifdef CONFIG_THUMB2_KERNEL
+#define CSDB ".inst.w 0xf3af8014"
+#else
+#define CSDB ".inst 0xe320f014"
+#endif
+#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
: : "r" (0) : "memory")
#elif defined(CONFIG_CPU_FA526)
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#else
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+#endif
+
+#ifndef CSDB
+#define CSDB
+#endif
+#ifndef csdb
+#define csdb()
+#endif
+
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
#endif
-#ifdef CONFIG_ARCH_HAS_BARRIERS
-#include <mach/barriers.h>
-#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb() do { dsb(); outer_sync(); } while (0)
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#define mb() __arm_heavy_mb()
#define rmb() dsb()
-#define wmb() mb()
+#define wmb() __arm_heavy_mb(st)
+#define dma_rmb() dmb(osh)
+#define dma_wmb() dmb(oshst)
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
#endif
-#ifndef CONFIG_SMP
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#else
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
-#endif
+#define __smp_mb() dmb(ish)
+#define __smp_rmb() __smp_mb()
+#define __smp_wmb() dmb(ishst)
-#define read_barrier_depends() do { } while(0)
-#define smp_read_barrier_depends() do { } while(0)
+#ifdef CONFIG_CPU_SPECTRE
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ "cmp %1, %2\n"
+ " sbc %0, %1, %1\n"
+ CSDB
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ return mask;
+}
+#define array_index_mask_nospec array_index_mask_nospec
+#endif
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index e691ec91e4d3..714440fa2fc6 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 1995, Russell King.
* Various bits and pieces copyrights include:
@@ -25,9 +26,7 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
-
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
+#include <asm/barrier.h>
/*
* These functions are the basis of our bit ops.
@@ -37,9 +36,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p |= mask;
@@ -49,9 +48,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p &= ~mask;
@@ -61,9 +60,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p ^= mask;
@@ -75,9 +74,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -92,9 +91,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -109,9 +108,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -161,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern int _find_first_zero_bit_le(const void * p, unsigned size);
-extern int _find_next_zero_bit_le(const void * p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern int _find_first_zero_bit_be(const void * p, unsigned size);
-extern int _find_next_zero_bit_be(const void * p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_be(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
#ifndef CONFIG_SMP
/*
@@ -216,7 +217,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
@@ -224,59 +224,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else
-static inline int constant_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
/*
- * On ARMv5 and above those functions can be implemented around
- * the clz instruction for much better code efficiency.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
*/
-
-static inline int fls(int x)
-{
- int ret;
-
- if (__builtin_constant_p(x))
- return constant_fls(x);
-
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
- ret = 32 - ret;
- return ret;
-}
-
-#define __fls(x) (fls(x) - 1)
-#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
-#define __ffs(x) (ffs(x) - 1)
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
#endif
+#include <asm-generic/bitops/ffz.h>
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/bitrev.h b/arch/arm/include/asm/bitrev.h
new file mode 100644
index 000000000000..84ad8dde62d6
--- /dev/null
+++ b/arch/arm/include/asm/bitrev.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..ba8d9d7d242b 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -1,9 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_BUG_H
#define _ASMARM_BUG_H
#include <linux/linkage.h>
-
-#ifdef CONFIG_BUG
+#include <linux/types.h>
+#include <asm/opcodes.h>
/*
* Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
@@ -12,10 +13,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,36 +34,36 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
+ ".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
".popsection"); \
unreachable(); \
} while (0)
-#else /* not CONFIG_DEBUG_BUGVERBOSE */
+#else
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
-#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err);
-struct siginfo;
-void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+void arm_notify_die(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr,
unsigned long err, unsigned long trap);
#ifdef CONFIG_ARM_LPAE
@@ -81,10 +82,12 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
struct mm_struct;
-extern void show_pte(struct mm_struct *mm, unsigned long addr);
+void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
+extern void __show_regs_alloc_free(struct pt_regs *regs);
#endif
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index a97f1ea708d1..fe385551edec 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -1,21 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/include/asm/bugs.h
- *
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
-#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
-#define check_bugs() check_writebuffer_bugs()
+#ifdef CONFIG_MMU
+extern void check_other_bugs(void);
#else
-#define check_bugs() do { } while (0)
+#define check_other_bugs() do { } while (0)
#endif
#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 75fe66bc02b4..ecbc100d22a5 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/cache.h
*/
@@ -23,6 +24,12 @@
#define ARCH_SLAB_MINALIGN 8
#endif
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(".data..read_mostly")
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+int cache_line_size(void);
+#endif
+#endif
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672fa..8ed8b9a24efe 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
@@ -35,7 +32,7 @@
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See Documentation/cachetlb.txt for more information.
+ * See Documentation/core-api/cachetlb.rst for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
@@ -116,7 +113,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
-};
+} __no_randomize_layout;
/*
* Select the calling method
@@ -140,8 +137,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +156,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -212,6 +205,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb(ishst);
}
/*
@@ -237,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
vma->vm_flags);
}
-static inline void
-vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
}
}
@@ -253,23 +248,24 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) \
- vivt_flush_cache_page(vma,addr,pfn)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(start,end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -287,7 +283,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
- * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * If this page isn't mapped (ie, folio_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
@@ -296,8 +292,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *);
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -319,22 +318,8 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(vma, page, vmaddr);
}
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-extern void flush_kernel_dcache_page(struct page *);
-
-#define flush_dcache_mmap_lock(mapping) \
- spin_lock_irq(&(mapping)->tree_lock)
-#define flush_dcache_mmap_unlock(mapping) \
- spin_unlock_irq(&(mapping)->tree_lock)
-
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
@@ -352,9 +337,11 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ishst);
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
@@ -436,4 +423,54 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ ".arch armv7-a \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
+
+void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
+ void *kaddr, unsigned long len);
+
+
+#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
+void check_cpu_icache_size(int cpuid);
+#else
+static inline void check_cpu_icache_size(int cpuid) { }
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index 7ea78144ae22..b01c59076b84 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CACHETYPE_H
#define __ASM_ARM_CACHETYPE_H
@@ -19,6 +20,8 @@ extern unsigned int cacheid;
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
+#define cpu_dcache_is_aliasing() (cache_is_vivt() || cache_is_vipt_aliasing())
+
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
* Mask out support which will never be present on newer CPUs.
@@ -56,4 +59,56 @@ static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
}
+#define CSSELR_ICACHE 1
+#define CSSELR_DCACHE 0
+
+#define CSSELR_L1 (0 << 1)
+#define CSSELR_L2 (1 << 1)
+#define CSSELR_L3 (2 << 1)
+#define CSSELR_L4 (3 << 1)
+#define CSSELR_L5 (4 << 1)
+#define CSSELR_L6 (5 << 1)
+#define CSSELR_L7 (6 << 1)
+
+#ifndef CONFIG_CPU_V7M
+static inline void set_csselr(unsigned int cache_selector)
+{
+ asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (cache_selector));
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline unsigned int read_clidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (val));
+ return val;
+}
+#else /* CONFIG_CPU_V7M */
+#include <linux/io.h>
+#include "asm/v7m.h"
+
+static inline void set_csselr(unsigned int cache_selector)
+{
+ writel(cache_selector, BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
+}
+
+static inline unsigned int read_clidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CLIDR);
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index 6dcc16430868..d8a13959bff0 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/checksum.h
*
@@ -10,6 +11,7 @@
#define __ASM_ARM_CHECKSUM_H
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
@@ -34,10 +36,21 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+
+ return csum_partial_copy_from_user(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
@@ -84,22 +97,36 @@ ip_fast_csum(const void *iph, unsigned int ihl)
}
static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
- __asm__(
- "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
- adcs %0, %0, %3 \n"
+ u32 lenprot = len + proto;
+ if (__builtin_constant_p(sum) && sum == 0) {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
#ifdef __ARMEB__
- "adcs %0, %0, %4 \n"
+ "adcs %0, %0, %3 \n\t"
#else
- "adcs %0, %0, %4, lsl #8 \n"
+ "adcs %0, %0, %3, ror #8 \n\t"
#endif
- "adcs %0, %0, %5 \n\
- adc %0, %0, #0"
- : "=&r"(sum)
- : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
- : "cc");
+ "adc %0, %0, #0"
+ : "=&r" (sum)
+ : "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ } else {
+ __asm__(
+ "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
+ "adcs %0, %0, %3 \n\t"
+#ifdef __ARMEB__
+ "adcs %0, %0, %4 \n\t"
+#else
+ "adcs %0, %0, %4, ror #8 \n\t"
+#endif
+ "adc %0, %0, #0"
+ : "=&r"(sum)
+ : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
+ : "cc");
+ }
return sum;
}
/*
@@ -107,8 +134,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
* returns a 16-bit checksum, already complemented
*/
static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
@@ -130,8 +157,8 @@ __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __
__be32 proto, __wsum sum);
static inline __sum16
-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
- unsigned short proto, __wsum sum)
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum)
{
return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
htonl(proto), sum));
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
deleted file mode 100644
index 80751c15c300..000000000000
--- a/arch/arm/include/asm/clkdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/include/asm/clkdev.h
- *
- * Copyright (C) 2008 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Helper for the clk API to assist looking up a struct clk.
- */
-#ifndef __ASM_CLKDEV_H
-#define __ASM_CLKDEV_H
-
-#include <linux/slab.h>
-
-#ifdef CONFIG_HAVE_MACH_CLKDEV
-#include <mach/clkdev.h>
-#else
-#define __clk_get(clk) ({ 1; })
-#define __clk_put(clk) do { } while (0)
-#endif
-
-static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
-{
- return kzalloc(size, GFP_KERNEL);
-}
-
-#endif
diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h
new file mode 100644
index 000000000000..13651c731a81
--- /dev/null
+++ b/arch/arm/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* _ASM_CLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..9beb64d30586 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -1,8 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CMPXCHG_H
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
+#include <linux/prefetch.h>
#include <asm/barrier.h>
+#include <linux/cmpxchg-emu.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
@@ -23,7 +26,8 @@
#define swp_is_buggy
#endif
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+static inline unsigned long
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
@@ -34,10 +38,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
- smp_mb();
+ prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
+#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
case 1:
asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n"
@@ -48,6 +53,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
+ case 2:
+ asm volatile("@ __xchg2\n"
+ "1: ldrexh %0, [%3]\n"
+ " strexh %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#endif
case 4:
asm volatile("@ __xchg4\n"
"1: ldrex %0, [%3]\n"
@@ -92,16 +108,18 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
break;
#endif
default:
+ /* Cause a link-time error, the xchg() size is not supported */
__bad_xchg(ptr, size), ret = 0;
break;
}
- smp_mb();
return ret;
}
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define arch_xchg_relaxed(ptr, x) ({ \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+})
#include <asm-generic/cmpxchg-local.h>
@@ -112,18 +130,22 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
+#define arch_xchg arch_xchg_relaxed
+
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
-#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
-#endif
#else /* min ARCH >= ARMv6 */
@@ -138,8 +160,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
unsigned long oldval, res;
+ prefetchw((const void *)ptr);
+
switch (size) {
-#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+#ifdef CONFIG_CPU_V6 /* ARCH == ARMv6 */
+ case 1:
+ oldval = cmpxchg_emu_u8((volatile u8 *)ptr, old, new);
+ break;
+#else /* min ARCH > ARMv6 */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
@@ -185,23 +213,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long ret;
-
- smp_mb();
- ret = __cmpxchg(ptr, old, new, size);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
@@ -213,7 +230,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -223,25 +240,45 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-
-#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ prefetchw(ptr);
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
index 8155db2f7fa1..5e94e67d1083 100644
--- a/arch/arm/include/asm/compiler.h
+++ b/arch/arm/include/asm/compiler.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_COMPILER_H
#define __ASM_ARM_COMPILER_H
@@ -8,8 +9,21 @@
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
+ * For compatibility with clang, we have to specifically take the equivalence
+ * of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well.
*/
-#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#define __asmeq(x, y) \
+ ".ifnc " x "," y "; " \
+ ".ifnc " x y ",fpr11; " \
+ ".ifnc " x y ",r11fp; " \
+ ".ifnc " x y ",ipr12; " \
+ ".ifnc " x y ",r12ip; " \
+ ".err; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif\n\t"
#endif /* __ASM_ARM_COMPILER_H */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 6493802f880a..a54230e65647 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CP15_H
#define __ASM_ARM_CP15_H
@@ -42,24 +43,25 @@
#ifndef __ASSEMBLY__
#if __LINUX_ARM_ARCH__ >= 4
-#define vectors_high() (cr_alignment & CR_V)
+#define vectors_high() (get_cr() & CR_V)
#else
#define vectors_high() (0)
#endif
#ifdef CONFIG_CPU_CP15
-extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
+#include <asm/vdso/cp15.h>
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
-static inline unsigned int get_cr(void)
+static inline unsigned long get_cr(void)
{
- unsigned int val;
+ unsigned long val;
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
return val;
}
-static inline void set_cr(unsigned int val)
+static inline void set_cr(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
: : "r" (val) : "cc");
@@ -80,10 +82,6 @@ static inline void set_auxcr(unsigned int val)
isb();
}
-#ifndef CONFIG_SMP
-extern void adjust_cr(unsigned long mask, unsigned long set);
-#endif
-
#define CPACC_FULL(n) (3 << (n * 2))
#define CPACC_SVC(n) (1 << (n * 2))
#define CPACC_DISABLE(n) (0 << (n * 2))
@@ -106,13 +104,17 @@ static inline void set_copro_access(unsigned int val)
#else /* ifdef CONFIG_CPU_CP15 */
/*
- * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
- * minds of the developers). Yielding 0 for machines without a cp15 (and making
- * it read-only) is fine for most cases and saves quite some #ifdeffery.
+ * cr_alignment is tightly coupled to cp15 (at least in the minds of the
+ * developers). Yielding 0 for machines without a cp15 (and making it
+ * read-only) is fine for most cases and saves quite some #ifdeffery.
*/
-#define cr_no_alignment UL(0)
#define cr_alignment UL(0)
+static inline unsigned long get_cr(void)
+{
+ return 0;
+}
+
#endif /* ifdef CONFIG_CPU_CP15 / else */
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 2744f0602550..9d8863537aa5 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/cpu.h
*
* Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_CPU_H
#define __ASM_ARM_CPU_H
@@ -14,7 +11,6 @@
#include <linux/cpu.h>
struct cpuinfo_arm {
- struct cpu cpu;
u32 cpuid;
#ifdef CONFIG_SMP
unsigned int loops_per_jiffy;
diff --git a/arch/arm/include/asm/cpufeature.h b/arch/arm/include/asm/cpufeature.h
new file mode 100644
index 000000000000..16c161b3ff4d
--- /dev/null
+++ b/arch/arm/include/asm/cpufeature.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <linux/log2.h>
+#include <asm/hwcap.h>
+
+/*
+ * Due to the fact that ELF_HWCAP is a 32-bit type on ARM, and given the number
+ * of optional CPU features it defines, ARM's CPU hardware capability bits have
+ * been distributed over separate elf_hwcap and elf_hwcap2 variables, each of
+ * which covers a subset of the available CPU features.
+ *
+ * Currently, only a few of those are suitable for automatic module loading
+ * (which is the primary use case of this facility) and those happen to be all
+ * covered by HWCAP2. So let's only cover those via the cpu_feature()
+ * convenience macro for now (which is used by module_cpu_feature_match()).
+ * However, all capabilities are exposed via the modalias, and can be matched
+ * using an explicit MODULE_DEVICE_TABLE() that uses __hwcap_feature() directly.
+ */
+#define MAX_CPU_FEATURES 64
+#define __hwcap_feature(x) ilog2(HWCAP_ ## x)
+#define __hwcap2_feature(x) (32 + ilog2(HWCAP2_ ## x))
+#define cpu_feature(x) __hwcap2_feature(x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return num < 32 ? elf_hwcap & BIT(num) : elf_hwcap2 & BIT(num - 32);
+}
+
+#endif
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 2fca60ab513a..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -1,12 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CPUIDLE_H
#define __ASM_ARM_CPUIDLE_H
+#include <asm/proc-fns.h>
+
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -15,7 +20,6 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
.exit_latency = 1,\
.target_residency = 1,\
.power_usage = p,\
- .flags = CPUIDLE_FLAG_TIME_VALID,\
.name = "WFI",\
.desc = "ARM WFI",\
}
@@ -26,4 +30,29 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
*/
#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+struct device_node;
+
+struct cpuidle_ops {
+ int (*suspend)(unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+};
+
+struct of_cpuidle_method {
+ const char *method;
+ const struct cpuidle_ops *ops;
+};
+
+#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
+
+extern int arm_cpuidle_suspend(int index);
+
+extern int arm_cpuidle_init(int cpu);
+
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e9851..0163c3e78a67 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_CPUTYPE_H
#define __ASM_ARM_CPUTYPE_H
-#include <linux/stringify.h>
-#include <linux/kernel.h>
-
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
@@ -26,6 +25,8 @@
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
+#define CPUID_EXT_ISAR6 0x7c
+#define CPUID_EXT_PFR2 0x90
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -41,6 +42,8 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+#define CPUID_EXT_ISAR6 "c2, 7"
+#define CPUID_EXT_PFR2 "c3, 4"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
@@ -54,29 +57,65 @@
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_BRCM 0x42
+#define ARM_CPU_IMP_DEC 0x44
#define ARM_CPU_IMP_INTEL 0x69
-#define ARM_CPU_PART_ARM1136 0xB360
-#define ARM_CPU_PART_ARM1156 0xB560
-#define ARM_CPU_PART_ARM1176 0xB760
-#define ARM_CPU_PART_ARM11MPCORE 0xB020
-#define ARM_CPU_PART_CORTEX_A8 0xC080
-#define ARM_CPU_PART_CORTEX_A9 0xC090
-#define ARM_CPU_PART_CORTEX_A5 0xC050
-#define ARM_CPU_PART_CORTEX_A15 0xC0F0
-#define ARM_CPU_PART_CORTEX_A7 0xC070
+/* ARM implemented processors */
+#define ARM_CPU_PART_ARM1136 0x4100b360
+#define ARM_CPU_PART_ARM1156 0x4100b560
+#define ARM_CPU_PART_ARM1176 0x4100b760
+#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
+#define ARM_CPU_PART_CORTEX_A8 0x4100c080
+#define ARM_CPU_PART_CORTEX_A9 0x4100c090
+#define ARM_CPU_PART_CORTEX_A5 0x4100c050
+#define ARM_CPU_PART_CORTEX_A7 0x4100c070
+#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
+#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
+#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
+#define ARM_CPU_PART_CORTEX_A53 0x4100d030
+#define ARM_CPU_PART_CORTEX_A57 0x4100d070
+#define ARM_CPU_PART_CORTEX_A72 0x4100d080
+#define ARM_CPU_PART_CORTEX_A73 0x4100d090
+#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
+#define ARM_CPU_PART_MASK 0xff00fff0
+
+/* Broadcom implemented processors */
+#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
+#define ARM_CPU_PART_BRAHMA_B53 0x42001000
+
+/* DEC implemented cores */
+#define ARM_CPU_PART_SA1100 0x4400a110
+
+/* Intel implemented cores */
+#define ARM_CPU_PART_SA1110 0x6900b110
+#define ARM_CPU_REV_SA1110_A0 0
+#define ARM_CPU_REV_SA1110_B0 4
+#define ARM_CPU_REV_SA1110_B1 5
+#define ARM_CPU_REV_SA1110_B2 6
+#define ARM_CPU_REV_SA1110_B4 8
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+
extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
@@ -89,13 +128,18 @@ extern unsigned int processor_id;
__val; \
})
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
- : "cc"); \
+ : "memory"); \
__val; \
})
@@ -142,6 +186,16 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return read_cpuid(CPUID_CACHETYPE);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return read_cpuid(CPUID_MPUIR);
+}
+
#elif defined(CONFIG_CPU_V7M)
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -149,6 +203,16 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
}
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return readl(BASEADDR_V7M_SCB + MPU_TYPE);
+}
+
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -163,19 +227,29 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
return (read_cpuid_id() & 0xFF000000) >> 24;
}
-static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+static inline unsigned int __attribute_const__ read_cpuid_revision(void)
{
- return read_cpuid_id() & 0xFFF0;
+ return read_cpuid_id() & 0x0000000f;
}
-static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+/*
+ * The CPU part number is meaningless without referring to the CPU
+ * implementer: implementers are free to define their own part numbers
+ * which are permitted to clash with other implementer part numbers.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_part(void)
{
- return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+ return read_cpuid_id() & ARM_CPU_PART_MASK;
}
-static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
{
- return read_cpuid(CPUID_CACHETYPE);
+ return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+ return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
}
static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
@@ -188,6 +262,10 @@ static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
return read_cpuid(CPUID_MPIDR);
}
+/* StrongARM-11x0 CPUs */
+#define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
+#define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
@@ -208,10 +286,63 @@ static inline int cpu_is_xsc3(void)
}
#endif
-#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
-#define cpu_is_xscale() 0
+#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \
+ !defined(CONFIG_CPU_MOHAWK)
+#define cpu_is_xscale_family() 0
+#else
+static inline int cpu_is_xscale_family(void)
+{
+ unsigned int id;
+ id = read_cpuid_id() & 0xffffe000;
+
+ switch (id) {
+ case 0x69052000: /* Intel XScale 1 */
+ case 0x69054000: /* Intel XScale 2 */
+ case 0x69056000: /* Intel XScale 3 */
+ case 0x56056000: /* Marvell XScale 3 */
+ case 0x56158000: /* Marvell Mohawk */
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * Marvell's PJ4 and PJ4B cores are based on V7 version,
+ * but require a specical sequence for enabling coprocessors.
+ * For this reason, we need a way to distinguish them.
+ */
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
+static inline int cpu_is_pj4(void)
+{
+ unsigned int id;
+
+ id = read_cpuid_id();
+ if ((id & 0xff0fff00) == 0x560f5800)
+ return 1;
+
+ return 0;
+}
#else
-#define cpu_is_xscale() 1
+#define cpu_is_pj4() 0
#endif
+static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
+ int field)
+{
+ int feature = (features >> field) & 15;
+
+ /* feature registers are signed values */
+ if (feature > 7)
+ feature -= 16;
+
+ return feature;
+}
+
+#define cpuid_feature_extract(reg, field) \
+ cpuid_feature_extract_field(read_cpuid_ext(reg), field)
+
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
deleted file mode 100644
index 2381199acb7d..000000000000
--- a/arch/arm/include/asm/cti.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef __ASMARM_CTI_H
-#define __ASMARM_CTI_H
-
-#include <asm/io.h>
-#include <asm/hardware/coresight.h>
-
-/* The registers' definition is from section 3.2 of
- * Embedded Cross Trigger Revision: r0p0
- */
-#define CTICONTROL 0x000
-#define CTISTATUS 0x004
-#define CTILOCK 0x008
-#define CTIPROTECTION 0x00C
-#define CTIINTACK 0x010
-#define CTIAPPSET 0x014
-#define CTIAPPCLEAR 0x018
-#define CTIAPPPULSE 0x01c
-#define CTIINEN 0x020
-#define CTIOUTEN 0x0A0
-#define CTITRIGINSTATUS 0x130
-#define CTITRIGOUTSTATUS 0x134
-#define CTICHINSTATUS 0x138
-#define CTICHOUTSTATUS 0x13c
-#define CTIPERIPHID0 0xFE0
-#define CTIPERIPHID1 0xFE4
-#define CTIPERIPHID2 0xFE8
-#define CTIPERIPHID3 0xFEC
-#define CTIPCELLID0 0xFF0
-#define CTIPCELLID1 0xFF4
-#define CTIPCELLID2 0xFF8
-#define CTIPCELLID3 0xFFC
-
-/* The below are from section 3.6.4 of
- * CoreSight v1.0 Architecture Specification
- */
-#define LOCKACCESS 0xFB0
-#define LOCKSTATUS 0xFB4
-
-/**
- * struct cti - cross trigger interface struct
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out_for_irq: triger out number which will cause
- * the @irq happen
- *
- * cti struct used to operate cti registers.
- */
-struct cti {
- void __iomem *base;
- int irq;
- int trig_out_for_irq;
-};
-
-/**
- * cti_init - initialize the cti instance
- * @cti: cti instance
- * @base: mapped virtual address for the cti base
- * @irq: irq number for the cti
- * @trig_out: triger out number which will cause
- * the @irq happen
- *
- * called by machine code to pass the board dependent
- * @base, @irq and @trig_out to cti.
- */
-static inline void cti_init(struct cti *cti,
- void __iomem *base, int irq, int trig_out)
-{
- cti->base = base;
- cti->irq = irq;
- cti->trig_out_for_irq = trig_out;
-}
-
-/**
- * cti_map_trigger - use the @chan to map @trig_in to @trig_out
- * @cti: cti instance
- * @trig_in: trigger in number
- * @trig_out: trigger out number
- * @channel: channel number
- *
- * This function maps one trigger in of @trig_in to one trigger
- * out of @trig_out using the channel @chan.
- */
-static inline void cti_map_trigger(struct cti *cti,
- int trig_in, int trig_out, int chan)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINEN + trig_in * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIINEN + trig_in * 4);
-
- val = __raw_readl(base + CTIOUTEN + trig_out * 4);
- val |= BIT(chan);
- __raw_writel(val, base + CTIOUTEN + trig_out * 4);
-}
-
-/**
- * cti_enable - enable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_enable(struct cti *cti)
-{
- __raw_writel(0x1, cti->base + CTICONTROL);
-}
-
-/**
- * cti_disable - disable the cti module
- * @cti: cti instance
- *
- * enable the cti module
- */
-static inline void cti_disable(struct cti *cti)
-{
- __raw_writel(0, cti->base + CTICONTROL);
-}
-
-/**
- * cti_irq_ack - clear the cti irq
- * @cti: cti instance
- *
- * clear the cti irq
- */
-static inline void cti_irq_ack(struct cti *cti)
-{
- void __iomem *base = cti->base;
- unsigned long val;
-
- val = __raw_readl(base + CTIINTACK);
- val |= BIT(cti->trig_out_for_irq);
- __raw_writel(val, base + CTIINTACK);
-}
-
-/**
- * cti_unlock - unlock cti module
- * @cti: cti instance
- *
- * unlock the cti module, or else any writes to the cti
- * module is not allowed.
- */
-static inline void cti_unlock(struct cti *cti)
-{
- __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-
-/**
- * cti_lock - lock cti module
- * @cti: cti instance
- *
- * lock the cti module, so any writes to the cti
- * module will be not allowed.
- */
-static inline void cti_lock(struct cti *cti)
-{
- __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
-}
-#endif
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
new file mode 100644
index 000000000000..5225cb1c803b
--- /dev/null
+++ b/arch/arm/include/asm/current.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Keith Packard <keithp@keithp.com>
+ * Copyright (c) 2021 Google, LLC <ardb@kernel.org>
+ */
+
+#ifndef _ASM_ARM_CURRENT_H
+#define _ASM_ARM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+#include <asm/insn.h>
+
+struct task_struct;
+
+extern struct task_struct *__current;
+
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
+{
+ struct task_struct *cur;
+
+#if __has_builtin(__builtin_thread_pointer) && defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
+ /*
+ * Use the __builtin helper when available - this results in better
+ * code, especially when using GCC in combination with the per-task
+ * stack protector, as the compiler will recognize that it needs to
+ * load the TLS register only once in every function.
+ */
+ cur = __builtin_thread_pointer();
+#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+ asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __current \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r"(cur));
+#elif __LINUX_ARM_ARCH__>= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ cur = __current;
+#else
+ asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
+#endif
+ return cur;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARM_CURRENT_H */
diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h
new file mode 100644
index 000000000000..d24c4be724e3
--- /dev/null
+++ b/arch/arm/include/asm/dcc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/barrier.h>
+
+static inline u32 __dcc_getstatus(void)
+{
+ u32 __ret;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
+ : "=r" (__ret) : : "cc");
+
+ return __ret;
+}
+
+static inline char __dcc_getchar(void)
+{
+ char __c;
+
+ asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
+ : "=r" (__c));
+ isb();
+
+ return __c;
+}
+
+static inline void __dcc_putchar(char c)
+{
+ asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
+ : /* no output register */
+ : "r" (c));
+ isb();
+}
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index dff714d886d5..1d069e558d8d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1995-2004 Russell King
*
@@ -6,12 +7,39 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/param.h> /* HZ */
+/*
+ * Loop (or tick) based delay:
+ *
+ * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
+ *
+ * where:
+ *
+ * jiffies_per_sec = HZ
+ * us_per_sec = 1000000
+ *
+ * Therefore the constant part is HZ / 1000000 which is a small
+ * fractional number. To make this usable with integer math, we
+ * scale up this constant by 2^31, perform the actual multiplication,
+ * and scale the result back down by 2^31 with a simple shift:
+ *
+ * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
+ *
+ * where:
+ *
+ * UDELAY_MULT = 2^31 * HZ / 1000000
+ * = (2^31 / 1000000) * HZ
+ * = 2147.483648 * HZ
+ * = 2147 * HZ + 483648 * HZ / 1000000
+ *
+ * 31 is the biggest scale shift value that won't overflow 32 bits for
+ * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
+ */
#define MAX_UDELAY_MS 2
-#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
-#define UDELAY_SHIFT 30
+#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_SHIFT 31
#ifndef __ASSEMBLY__
@@ -34,7 +62,7 @@ extern struct arm_delay_ops {
* it, it means that you're calling udelay() with an out of range value.
*
* With currently imposed limits, this means that we support a max delay
- * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ * of 2000us. Further limits: HZ<=1000
*/
extern void __bad_udelay(void);
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..c6beb1708c64 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -1,22 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
*/
#ifndef ASMARM_DEVICE_H
#define ASMARM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
-#ifdef CONFIG_DMABOUNCE
- struct dmabounce_device_info *dmabounce;
-#endif
-#ifdef CONFIG_IOMMU_API
- void *iommu; /* private IOMMU data */
-#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
+ unsigned int dma_ops_setup:1;
};
struct omap_device;
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 191ada6e4d2d..d3ef8e416b27 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
@@ -5,9 +6,9 @@
#include <asm/compiler.h>
/*
- * The semantics of do_div() are:
+ * The semantics of __div64_32() are:
*
- * uint32_t do_div(uint64_t *n, uint32_t base)
+ * uint32_t __div64_32(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
@@ -16,211 +17,101 @@
*
* In other words, a 64-bit dividend with a 32-bit divisor producing
* a 64-bit result and a 32-bit remainder. To accomplish this optimally
- * we call a special __do_div64 helper with completely non standard
- * calling convention for arguments and results (beware).
+ * we override the generic version in lib/div64.c to call our __do_div64
+ * assembly implementation with completely non standard calling convention
+ * for arguments and results (beware).
*/
+static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ register unsigned int __base asm("r4") = base;
+ register unsigned long long __n asm("r0") = *n;
+ register unsigned long long __res asm("r2");
+ unsigned int __rem;
+ asm( __asmeq("%0", "r0")
+ __asmeq("%1", "r2")
+ __asmeq("%2", "r4")
+ "bl __do_div64"
+ : "+r" (__n), "=r" (__res)
+ : "r" (__base)
+ : "ip", "lr", "cc");
+ __rem = __n >> 32;
+ *n = __res;
+ return __rem;
+}
+#define __div64_32 __div64_32
-#ifdef __ARMEB__
-#define __xh "r0"
-#define __xl "r1"
-#else
-#define __xl "r0"
-#define __xh "r1"
-#endif
-
-#define __do_div_asm(n, base) \
-({ \
- register unsigned int __base asm("r4") = base; \
- register unsigned long long __n asm("r0") = n; \
- register unsigned long long __res asm("r2"); \
- register unsigned int __rem asm(__xh); \
- asm( __asmeq("%0", __xh) \
- __asmeq("%1", "r2") \
- __asmeq("%2", "r0") \
- __asmeq("%3", "r4") \
- "bl __do_div64" \
- : "=r" (__rem), "=r" (__res) \
- : "r" (__n), "r" (__base) \
- : "ip", "lr", "cc"); \
- n = __res; \
- __rem; \
-})
-
-#if __GNUC__ < 4 || !defined(CONFIG_AEABI)
+#if !defined(CONFIG_AEABI)
/*
- * gcc versions earlier than 4.0 are simply too problematic for the
- * optimized implementation below. First there is gcc PR 15089 that
- * tend to trig on more complex constructs, spurious .global __udivsi3
- * are inserted even if none of those symbols are referenced in the
- * generated code, and those gcc versions are not able to do constant
- * propagation on long long values anyway.
+ * In OABI configurations, some uses of the do_div function
+ * cause gcc to run out of registers. To work around that,
+ * we can force the use of the out-of-line version for
+ * configurations that build a OABI kernel.
*/
-#define do_div(n, base) __do_div_asm(n, base)
+#define do_div(n, base) __div64_32(&(n), base)
-#elif __GNUC__ >= 4
+#else
-#include <asm/bug.h>
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
+{
+ unsigned long long res;
+ register unsigned int tmp asm("ip") = 0;
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
-/*
- * If the divisor happens to be constant, we determine the appropriate
- * inverse at compile time to turn the division into a few inline
- * multiplications instead which is much faster. And yet only if compiling
- * for ARMv4 or higher (we need umull/umlal) and if the gcc version is
- * sufficiently recent to perform proper long long constant propagation.
- * (It is unfortunate that gcc doesn't perform all this internally.)
- */
-#define do_div(n, base) \
-({ \
- unsigned int __r, __b = (base); \
- if (!__builtin_constant_p(__b) || __b == 0 || \
- (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \
- /* non-constant divisor (or zero): slow path */ \
- __r = __do_div_asm(n, __b); \
- } else if ((__b & (__b - 1)) == 0) { \
- /* Trivial: __b is constant and a power of 2 */ \
- /* gcc does the right thing with this code. */ \
- __r = n; \
- __r &= (__b - 1); \
- n /= __b; \
- } else { \
- /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \
- /* We rely on the fact that most of this code gets */ \
- /* optimized away at compile time due to constant */ \
- /* propagation and only a couple inline assembly */ \
- /* instructions should remain. Better avoid any */ \
- /* code construct that might prevent that. */ \
- unsigned long long __res, __x, __t, __m, __n = n; \
- unsigned int __c, __p, __z = 0; \
- /* preserve low part of n for reminder computation */ \
- __r = __n; \
- /* determine number of bits to represent __b */ \
- __p = 1 << __div64_fls(__b); \
- /* compute __m = ((__p << 64) + __b - 1) / __b */ \
- __m = (~0ULL / __b) * __p; \
- __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \
- /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \
- __x = ~0ULL / __b * __b - 1; \
- __res = (__m & 0xffffffff) * (__x & 0xffffffff); \
- __res >>= 32; \
- __res += (__m & 0xffffffff) * (__x >> 32); \
- __t = __res; \
- __res += (__x & 0xffffffff) * (__m >> 32); \
- __t = (__res < __t) ? (1ULL << 32) : 0; \
- __res = (__res >> 32) + __t; \
- __res += (__m >> 32) * (__x >> 32); \
- __res /= __p; \
- /* Now sanitize and optimize what we've got. */ \
- if (~0ULL % (__b / (__b & -__b)) == 0) { \
- /* those cases can be simplified with: */ \
- __n /= (__b & -__b); \
- __m = ~0ULL / (__b / (__b & -__b)); \
- __p = 1; \
- __c = 1; \
- } else if (__res != __x / __b) { \
- /* We can't get away without a correction */ \
- /* to compensate for bit truncation errors. */ \
- /* To avoid it we'd need an additional bit */ \
- /* to represent __m which would overflow it. */ \
- /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \
- __c = 1; \
- /* Compute __m = (__p << 64) / __b */ \
- __m = (~0ULL / __b) * __p; \
- __m += ((~0ULL % __b + 1) * __p) / __b; \
- } else { \
- /* Reduce __m/__p, and try to clear bit 31 */ \
- /* of __m when possible otherwise that'll */ \
- /* need extra overflow handling later. */ \
- unsigned int __bits = -(__m & -__m); \
- __bits |= __m >> 32; \
- __bits = (~__bits) << 1; \
- /* If __bits == 0 then setting bit 31 is */ \
- /* unavoidable. Simply apply the maximum */ \
- /* possible reduction in that case. */ \
- /* Otherwise the MSB of __bits indicates the */ \
- /* best reduction we should apply. */ \
- if (!__bits) { \
- __p /= (__m & -__m); \
- __m /= (__m & -__m); \
- } else { \
- __p >>= __div64_fls(__bits); \
- __m >>= __div64_fls(__bits); \
- } \
- /* No correction needed. */ \
- __c = 0; \
- } \
- /* Now we have a combination of 2 conditions: */ \
- /* 1) whether or not we need a correction (__c), and */ \
- /* 2) whether or not there might be an overflow in */ \
- /* the cross product (__m & ((1<<63) | (1<<31))) */ \
- /* Select the best insn combination to perform the */ \
- /* actual __m * __n / (__p << 64) operation. */ \
- if (!__c) { \
- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
- "mov %Q0, #0" \
- : "=&r" (__res) \
- : "r" (__m), "r" (__n) \
- : "cc" ); \
- } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
- __res = __m; \
- asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \
- "mov %Q0, #0" \
- : "+&r" (__res) \
- : "r" (__m), "r" (__n) \
- : "cc" ); \
- } else { \
- asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
- "cmn %Q0, %Q1\n\t" \
- "adcs %R0, %R0, %R1\n\t" \
- "adc %Q0, %3, #0" \
- : "=&r" (__res) \
- : "r" (__m), "r" (__n), "r" (__z) \
- : "cc" ); \
- } \
- if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
- asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \
- "umlal %R0, %Q0, %Q1, %R2\n\t" \
- "mov %R0, #0\n\t" \
- "umlal %Q0, %R0, %R1, %R2" \
- : "+&r" (__res) \
- : "r" (__m), "r" (__n) \
- : "cc" ); \
- } else { \
- asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \
- "umlal %R0, %1, %Q2, %R3\n\t" \
- "mov %R0, #0\n\t" \
- "adds %Q0, %1, %Q0\n\t" \
- "adc %R0, %R0, #0\n\t" \
- "umlal %Q0, %R0, %R2, %R3" \
- : "+&r" (__res), "+&r" (__z) \
- : "r" (__m), "r" (__n) \
- : "cc" ); \
- } \
- __res /= __p; \
- /* The reminder can be computed with 32-bit regs */ \
- /* only, and gcc is good at that. */ \
- { \
- unsigned int __res0 = __res; \
- unsigned int __b0 = __b; \
- __r -= __res0 * __b0; \
- } \
- /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \
- n = __res; \
- } \
- __r; \
-})
+ if (!bias) {
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
+ "mov %Q0, #0"
+ : "=&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else if (no_ovf) {
+ res = m;
+ asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
+ "mov %Q0, #0"
+ : "+&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else {
+ asm ( "umull %Q0, %R0, %Q2, %Q3\n\t"
+ "cmn %Q0, %Q2\n\t"
+ "adcs %R0, %R0, %R2\n\t"
+ "adc %Q0, %1, #0"
+ : "=&r" (res), "+&r" (tmp)
+ : "r" (m), "r" (n)
+ : "cc");
+ }
+
+ if (no_ovf) {
+ asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
+ "umlal %R0, %Q0, %Q1, %R2\n\t"
+ "mov %R0, #0\n\t"
+ "umlal %Q0, %R0, %R1, %R2"
+ : "+&r" (res)
+ : "r" (m), "r" (n)
+ : "cc");
+ } else {
+ asm ( "umlal %R0, %Q0, %R2, %Q3\n\t"
+ "umlal %R0, %1, %Q2, %R3\n\t"
+ "mov %R0, #0\n\t"
+ "adds %Q0, %1, %Q0\n\t"
+ "adc %R0, %R0, #0\n\t"
+ "umlal %Q0, %R0, %R2, %R3"
+ : "+&r" (res), "+&r" (tmp)
+ : "r" (m), "r" (n)
+ : "cc");
+ }
+
+ return res;
+}
+#define __arch_xprod_64 __arch_xprod_64
-/* our own fls implementation to make sure constant propagation is fine */
-#define __div64_fls(bits) \
-({ \
- unsigned int __left = (bits), __nr = 0; \
- if (__left & 0xffff0000) __nr += 16, __left >>= 16; \
- if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \
- if (__left & 0x000000f0) __nr += 4, __left >>= 4; \
- if (__left & 0x0000000c) __nr += 2, __left >>= 2; \
- if (__left & 0x00000002) __nr += 1; \
- __nr; \
-})
+#include <asm-generic/div64.h>
#endif
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
deleted file mode 100644
index 3ed37b4d93da..000000000000
--- a/arch/arm/include/asm/dma-contiguous.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef ASMARM_DMA_CONTIGUOUS_H
-#define ASMARM_DMA_CONTIGUOUS_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_CMA
-
-#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
-
-void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
-
-#endif
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index a8c56acc8c98..2ce4c5683e6d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_DMA_IOMMU_H
#define ASMARM_DMA_IOMMU_H
@@ -5,17 +6,17 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/kref.h>
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
- void *bitmap;
- size_t bits;
- unsigned int order;
+ unsigned long **bitmaps; /* array of bitmaps */
+ unsigned int nr_bitmaps; /* nr of elements in array */
+ unsigned int extensions;
+ size_t bitmap_size; /* size of a single bitmap */
+ size_t bits; /* per bitmap */
dma_addr_t base;
spinlock_t lock;
@@ -23,8 +24,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
- int order);
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
deleted file mode 100644
index 5b579b951503..000000000000
--- a/arch/arm/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,273 +0,0 @@
-#ifndef ASMARM_DMA_MAPPING_H
-#define ASMARM_DMA_MAPPING_H
-
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-debug.h>
-
-#include <asm-generic/dma-coherent.h>
-#include <asm/memory.h>
-
-#define DMA_ERROR_CODE (~0)
-extern struct dma_map_ops arm_dma_ops;
-extern struct dma_map_ops arm_coherent_dma_ops;
-
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
- return &arm_dma_ops;
-}
-
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- BUG_ON(!dev);
- dev->archdata.dma_ops = ops;
-}
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->set_dma_mask(dev, mask);
-}
-
-#ifdef __arch_page_to_dma
-#error Please update to __arch_pfn_to_dma
-#endif
-
-/*
- * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
- * functions used internally by the DMA-mapping API to provide DMA
- * addresses. They must not be used by drivers.
- */
-#ifndef __arch_pfn_to_dma
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- return __bus_to_pfn(addr);
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- return (void *)__bus_to_virt((unsigned long)addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
-}
-#else
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return __arch_pfn_to_dma(dev, pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_pfn(dev, addr);
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_virt(dev, addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- return __arch_virt_to_dma(dev, addr);
-}
-#endif
-
-/*
- * DMA errors are defined by all-bits-set in the DMA address.
- */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
- return dma_addr == DMA_ERROR_CODE;
-}
-
-/*
- * Dummy noncoherent implementation. We don't provide a dma_cache_sync
- * function so drivers using this API are highlighted with build warnings.
- */
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
-{
-}
-
-extern int dma_supported(struct device *dev, u64 mask);
-
-extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
-/**
- * arm_dma_alloc - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
- *
- * Allocate some memory for a device for performing DMA. This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
- */
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
-
-#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
- BUG_ON(!ops);
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
-/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
-
-#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
-/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
-
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
-}
-
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
-/*
- * This can be called during early boot to increase the size of the atomic
- * coherent DMA pool above the default value of 256KiB. It must be called
- * before postcore_initcall.
- */
-extern void __init init_dma_coherent_pool_size(unsigned long size);
-
-/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- * @needs_bounce_fn: called to determine whether buffer needs bouncing
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- */
-extern int dmabounce_register_dev(struct device *, unsigned long,
- unsigned long, int (*)(struct device *, dma_addr_t, size_t));
-
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
- */
-extern void dmabounce_unregister_dev(struct device *);
-
-
-
-/*
- * The scatter list versions of the above methods.
- */
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 58b8c6a0ab1f..e2a1916013e7 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_DMA_H
#define __ASM_ARM_DMA_H
@@ -8,9 +9,12 @@
#define MAX_DMA_ADDRESS 0xffffffffUL
#else
#define MAX_DMA_ADDRESS ({ \
- extern unsigned long arm_dma_zone_size; \
- arm_dma_zone_size ? \
+ extern phys_addr_t arm_dma_zone_size; \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
+
+extern phys_addr_t arm_dma_limit;
+#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
#endif
#ifdef CONFIG_ISA_DMA_API
@@ -19,7 +23,7 @@
* It should not be re-used except for that purpose.
*/
#include <linux/spinlock.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <mach/isa-dma.h>
@@ -105,7 +109,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
- __set_dma_addr(chan, (void *)__bus_to_virt(addr))
+ __set_dma_addr(chan, (void *)isa_bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
@@ -142,10 +146,4 @@ extern int get_dma_residue(unsigned int chan);
#endif /* CONFIG_ISA_DMA_API */
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/dmi.h b/arch/arm/include/asm/dmi.h
new file mode 100644
index 000000000000..32c95dad4cea
--- /dev/null
+++ b/arch/arm/include/asm/dmi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_early_unmap(x, l) memunmap(x)
+#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_unmap(x) memunmap(x)
+#define dmi_alloc(l) kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..d48859fdf32c 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -1,16 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/domain.h
*
* Copyright (C) 1999 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_DOMAIN_H
#define __ASM_PROC_DOMAIN_H
#ifndef __ASSEMBLY__
+#include <linux/thread_info.h>
#include <asm/barrier.h>
#endif
@@ -34,15 +32,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,31 +52,64 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
-static inline void set_domain(unsigned val)
+#ifdef CONFIG_CPU_CP15_MMU
+static __always_inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain)
+ : "m" (current_thread_info()->cpu_domain));
+
+ return domain;
+}
+
+static __always_inline void set_domain(unsigned int val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
- : : "r" (val));
+ : : "r" (val) : "memory");
isb();
}
-
-#define modify_domain(dom,type) \
- do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
- } while (0)
-
#else
-static inline void set_domain(unsigned val) { }
-static inline void modify_domain(unsigned dom, unsigned type) { }
+static __always_inline unsigned int get_domain(void)
+{
+ return 0;
+}
+
+static __always_inline void set_domain(unsigned int val)
+{
+}
#endif
/*
@@ -87,9 +117,11 @@ static inline void modify_domain(unsigned dom, unsigned type) { }
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
-#define TUSER(instr) #instr "t"
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr "t" #cond
#else
-#define TUSER(instr) #instr
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr #cond
#endif
#else /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
index eaea14676d57..7cbe001bf9cc 100644
--- a/arch/arm/include/asm/ecard.h
+++ b/arch/arm/include/asm/ecard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/ecard.h
*
@@ -194,7 +195,7 @@ void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
unsigned long offset, unsigned long maxsize);
#define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr)
-extern struct bus_type ecard_bus_type;
+extern const struct bus_type ecard_bus_type;
#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev)
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
index 0df7a2c1fc3d..16f95dd37b24 100644
--- a/arch/arm/include/asm/edac.h
+++ b/arch/arm/include/asm/edac.h
@@ -1,28 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2011 Calxeda, Inc.
* Based on PPC version Copyright 2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/*
* ECC atomic, DMA, SMP and interrupt safe scrub function.
- * Implements the per arch atomic_scrub() that EDAC use for software
+ * Implements the per arch edac_atomic_scrub() that EDAC use for software
* ECC scrubbing. It reads memory and then writes back the original
* value, allowing the hardware to detect and correct memory errors.
*/
-static inline void atomic_scrub(void *va, u32 size)
+
+static inline void edac_atomic_scrub(void *va, u32 size)
{
#if __LINUX_ARM_ARCH__ >= 6
unsigned int *virt_addr = va;
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
new file mode 100644
index 000000000000..e408399d5f0e
--- /dev/null
+++ b/arch/arm/include/asm/efi.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#ifndef __ASM_ARM_EFI_H
+#define __ASM_ARM_EFI_H
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/early_ioremap.h>
+#include <asm/fixmap.h>
+#include <asm/highmem.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_EFI
+void efi_init(void);
+void arm_efi_init(void);
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
+
+#define arch_efi_call_virt_setup() efi_virtmap_load()
+#define arch_efi_call_virt_teardown() efi_virtmap_unload()
+
+#ifdef CONFIG_CPU_TTBR0_PAN
+#undef arch_efi_call_virt
+#define arch_efi_call_virt(p, f, args...) ({ \
+ unsigned int flags = uaccess_save_and_enable(); \
+ efi_status_t res = _Generic((p)->f(args), \
+ efi_status_t: (p)->f(args), \
+ default: ((p)->f(args), EFI_ABORTED)); \
+ uaccess_restore(flags); \
+ res; \
+})
+#endif
+
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+ (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
+ PSR_T_BIT | MODE_MASK)
+
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ check_and_switch_context(mm, NULL);
+}
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
+#else
+#define arm_efi_init()
+#endif /* CONFIG_EFI */
+
+/* arch specific definitions used by the stub code */
+
+/*
+ * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
+ * so we will reserve that amount of memory. We have no easy way to tell what
+ * the actuall size of code + data the uncompressed kernel will use.
+ * If this is insufficient, the decompressor will relocate itself out of the
+ * way before performing the decompression.
+ */
+#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
+
+/*
+ * phys-to-virt patching requires that the physical to virtual offset is a
+ * multiple of 2 MiB. However, using an alignment smaller than TEXT_OFFSET
+ * here throws off the memory allocation logic, so let's use the lowest power
+ * of two greater than 2 MiB and greater than TEXT_OFFSET.
+ */
+#define EFI_PHYS_ALIGN max(UL(SZ_2M), roundup_pow_of_two(TEXT_OFFSET))
+
+/* on ARM, the initrd should be loaded in a lowmem region */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return round_down(image_addr, SZ_4M) + SZ_512M;
+}
+
+struct efi_arm_entry_state {
+ u32 cpsr_before_ebs;
+ u32 sctlr_before_ebs;
+ u32 cpsr_after_ebs;
+ u32 sctlr_after_ebs;
+};
+
+static inline void efi_capsule_flush_cache_range(void *addr, int size)
+{
+ __cpuc_flush_dcache_area(addr, size);
+}
+
+#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c4800..9f21e170320f 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ELF_H
#define __ASMARM_ELF_H
+#include <asm/auxvec.h>
#include <asm/hwcap.h>
/*
@@ -19,8 +21,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
-#define EM_ARM 40
-
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
@@ -50,17 +50,26 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
+#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
/*
* These are used to set parameters in the core dumps.
@@ -99,35 +108,48 @@ struct elf32_hdr;
extern int elf_check_arch(const struct elf32_hdr *);
#define elf_check_arch elf_check_arch
-#define vmcore_elf64_check_arch(x) (0)
+#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC platform */
+#define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
+#define elf_check_const_displacement(x) ((x)->e_flags & EF_ARM_PIC)
+#define ELF_FDPIC_CORE_EFLAGS 0
-extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
-#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
+#define vmcore_elf64_check_arch(x) (0)
-struct task_struct;
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
-#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+extern int arm_elf_read_implies_exec(int);
+#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->ARM_r7 = _exec_map_addr; \
+ (_r)->ARM_r8 = _interp_map_addr; \
+ (_r)->ARM_r9 = dynamic_addr; \
+ } while(0)
+
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#ifdef CONFIG_VDSO
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+} while (0)
+#endif
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
#endif
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
deleted file mode 100644
index 88d61815f0c0..000000000000
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ /dev/null
@@ -1,39 +0,0 @@
-#include <asm/assembler.h>
-
-/*
- * Interrupt handling. Preserves r7, r8, r9
- */
- .macro arch_irq_handler_default
- get_irqnr_preamble r6, lr
-1: get_irqnr_and_base r0, r2, r6, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- adrne lr, BSYM(1b)
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r2) and base (r6) are
- * preserved from get_irqnr_and_base above
- */
- ALT_SMP(test_for_ipi r0, r2, r6, lr)
- ALT_UP_B(9997f)
- movne r1, sp
- adrne lr, BSYM(1b)
- bne do_IPI
-#endif
-9997:
- .endm
-
- .macro arch_irq_handler, symbol_name
- .align 5
- .global \symbol_name
-\symbol_name:
- mov r8, lr
- arch_irq_handler_default
- mov pc, r8
- .endm
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
index 5abaf5bbd985..3c82975d46db 100644
--- a/arch/arm/include/asm/exception.h
+++ b/arch/arm/include/asm/exception.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Annotations for marking C functions as exception handlers.
*
@@ -7,13 +8,8 @@
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H
-#include <linux/ftrace.h>
+#include <linux/interrupt.h>
-#define __exception __attribute__((section(".exception.text")))
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry __exception
-#endif
#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
deleted file mode 100644
index d92e99cd8c8a..000000000000
--- a/arch/arm/include/asm/fb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-}
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index d493d0b742a1..6bdfb4a47322 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/fiq.h
*
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
index 15631300c238..23fe0bd405c7 100644
--- a/arch/arm/include/asm/firmware.h
+++ b/arch/arm/include/asm/firmware.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Samsung Electronics.
* Kyungmin Park <kyungmin.park@samsung.com>
* Tomasz Figa <t.figa@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FIRMWARE_H
@@ -22,14 +19,22 @@
*/
struct firmware_ops {
/*
+ * Inform the firmware we intend to enter CPU idle mode
+ */
+ int (*prepare_idle)(unsigned long mode);
+ /*
* Enters CPU idle mode
*/
- int (*do_idle)(void);
+ int (*do_idle)(unsigned long mode);
/*
* Sets boot address of specified physical CPU
*/
int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
/*
+ * Gets boot address of specified physical CPU
+ */
+ int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr);
+ /*
* Boots specified physical CPU
*/
int (*cpu_boot)(int cpu);
@@ -37,6 +42,14 @@ struct firmware_ops {
* Initializes L2 cache
*/
int (*l2x0_init)(void);
+ /*
+ * Enter system-wide suspend.
+ */
+ int (*suspend)(void);
+ /*
+ * Restore state of privileged hardware after system-wide suspend.
+ */
+ int (*resume)(void);
};
/* Global pointer for current firmware_ops structure, can't be NULL. */
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index bbae919bceb4..707068f852c2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -1,41 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-/*
- * Nothing too fancy for now.
- *
- * On ARM we already have well known fixed virtual addresses imposed by
- * the architecture such as the vector page which is located at 0xffff0000,
- * therefore a second level page table is already allocated covering
- * 0xfff00000 upwards.
- *
- * The cache flushing code in proc-xscale.S uses the virtual area between
- * 0xfffe0000 and 0xfffeffff.
- */
-
-#define FIXADDR_START 0xfff00000UL
-#define FIXADDR_TOP 0xfffe0000UL
-#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
-
-#define FIX_KMAP_BEGIN 0
-#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
-
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- if (idx >= FIX_KMAP_END)
- __this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
-}
-
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+#define FIXADDR_START 0xffc80000UL
+#define FIXADDR_END 0xfff00000UL
+#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
+#include <linux/pgtable.h>
+#include <asm/kmap_size.h>
+
+enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_TEXT_POKE1,
+
+ __end_of_fixmap_region,
+
+ /*
+ * Share the kmap() region with early_ioremap(): this is guaranteed
+ * not to clash since early_ioremap() is only available before
+ * paging_init(), and kmap() only after.
+ */
+#define NR_FIX_BTMAPS 32
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_early_ioremap_region
+};
+
+static const enum fixed_addresses __end_of_fixed_addresses =
+ __end_of_fixmap_region > __end_of_early_ioremap_region ?
+ __end_of_fixmap_region : __end_of_early_ioremap_region;
+
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (pgprot_kernel | L_PTE_XN)
+#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
+#define __early_set_fixmap __set_fixmap
+
+#ifdef CONFIG_MMU
+
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
+
+#include <asm-generic/fixmap.h>
+
+#else
+
+static inline void early_fixmap_init(void) { }
+
+#endif
#endif
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
deleted file mode 100644
index e847d23351ed..000000000000
--- a/arch/arm/include/asm/flat.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/include/asm/flat.h -- uClinux flat-format executables
- */
-
-#ifndef __ARM_FLAT_H__
-#define __ARM_FLAT_H__
-
-#define flat_argvp_envp_on_stack() 1
-#define flat_old_ram_flag(flags) (flags)
-#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
-#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
-#define flat_get_relocate_addr(rel) (rel)
-#define flat_set_persistent(relval, p) 0
-
-#endif /* __ARM_FLAT_H__ */
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d8..e579f77162e9 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -1,31 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/floppy.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here
*/
#ifndef __ASM_ARM_FLOPPY_H
#define __ASM_ARM_FLOPPY_H
-#if 0
-#include <mach/floppy.h>
-#endif
-#define fd_outb(val,port) \
- do { \
- if ((port) == FD_DOR) \
- fd_setdor((val)); \
- else \
- outb((val),(port)); \
+#define fd_outb(val, base, reg) \
+ do { \
+ int new_val = (val); \
+ if ((reg) == FD_DOR) { \
+ if (new_val & 0xf0) \
+ new_val = (new_val & 0x0c) | \
+ floppy_selects[new_val & 3]; \
+ else \
+ new_val &= 0x0c; \
+ } \
+ outb(new_val, (base) + (reg)); \
} while(0)
-#define fd_inb(port) inb((port))
+#define fd_inb(base, reg) inb((base) + (reg))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
- IRQF_DISABLED,"floppy",NULL)
+ 0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
@@ -56,69 +55,7 @@ static inline int fd_dma_setup(void *data, unsigned int length,
* to a non-zero track, and then restoring it to track 0. If an error occurs,
* then there is no floppy drive present. [to be put back in again]
*/
-static unsigned char floppy_selects[2][4] =
-{
- { 0x10, 0x21, 0x23, 0x33 },
- { 0x10, 0x21, 0x23, 0x33 }
-};
-
-#define fd_setdor(dor) \
-do { \
- int new_dor = (dor); \
- if (new_dor & 0xf0) \
- new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \
- else \
- new_dor &= 0x0c; \
- outb(new_dor, FD_DOR); \
-} while (0)
-
-/*
- * Someday, we'll automatically detect which drives are present...
- */
-static inline void fd_scandrives (void)
-{
-#if 0
- int floppy, drive_count;
-
- fd_disable_irq();
- raw_cmd = &default_raw_cmd;
- raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
- raw_cmd->track = 0;
- raw_cmd->rate = ?;
- drive_count = 0;
- for (floppy = 0; floppy < 4; floppy ++) {
- current_drive = drive_count;
- /*
- * Turn on floppy motor
- */
- if (start_motor(redo_fd_request))
- continue;
- /*
- * Set up FDC
- */
- fdc_specify();
- /*
- * Tell FDC to recalibrate
- */
- output_byte(FD_RECALIBRATE);
- LAST_OUT(UNIT(floppy));
- /* wait for command to complete */
- if (!successful) {
- int i;
- for (i = drive_count; i < 3; i--)
- floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
- floppy_selects[fdc][3] = 0;
- floppy -= 1;
- } else
- drive_count++;
- }
-#else
- floppy_selects[0][0] = 0x10;
- floppy_selects[0][1] = 0x21;
- floppy_selects[0][2] = 0x23;
- floppy_selects[0][3] = 0x33;
-#endif
-}
+static unsigned char floppy_selects[4] = { 0x10, 0x21, 0x23, 0x33 };
#define FDC1 (0x3f0)
@@ -128,8 +65,6 @@ static inline void fd_scandrives (void)
#define N_FDC 1
#define N_DRIVE 4
-#define CROSS_64KB(a,s) (0)
-
/*
* This allows people to reverse the order of
* fd0 and fd1, in case their hardware is
@@ -138,9 +73,7 @@ static inline void fd_scandrives (void)
*/
static void driveswap(int *ints, int dummy, int dummy2)
{
- floppy_selects[0][0] ^= floppy_selects[0][1];
- floppy_selects[0][1] ^= floppy_selects[0][0];
- floppy_selects[0][0] ^= floppy_selects[0][1];
+ swap(floppy_selects[0], floppy_selects[1]);
}
#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
index de5354746924..78217b0e76e3 100644
--- a/arch/arm/include/asm/fncpy.h
+++ b/arch/arm/include/asm/fncpy.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
index 3ad4c10d0d84..e29d9c7a5238 100644
--- a/arch/arm/include/asm/fpstate.h
+++ b/arch/arm/include/asm/fpstate.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/fpstate.h
*
* Copyright (C) 1995 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FPSTATE_H
@@ -49,9 +46,6 @@ union vfp_state {
struct vfp_hard_struct hard;
};
-extern void vfp_flush_thread(union vfp_state *);
-extern void vfp_release_thread(union vfp_state *);
-
#define FP_HARD_SIZE 35
struct fp_hard_struct {
@@ -80,14 +74,6 @@ union fp_state {
#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
-struct crunch_state {
- unsigned int mvdx[16][2];
- unsigned int mvax[4][3];
- unsigned int dspsc[2];
-};
-
-#define CRUNCH_SIZE sizeof(struct crunch_state)
-
#endif
#endif
diff --git a/arch/arm/include/asm/fpu.h b/arch/arm/include/asm/fpu.h
new file mode 100644
index 000000000000..2ae50bdce59b
--- /dev/null
+++ b/arch/arm/include/asm/fpu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 SiFive
+ */
+
+#ifndef __ASM_FPU_H
+#define __ASM_FPU_H
+
+#include <asm/neon.h>
+
+#define kernel_fpu_available() cpu_has_neon()
+#define kernel_fpu_begin() kernel_neon_begin()
+#define kernel_fpu_end() kernel_neon_end()
+
+#endif /* ! __ASM_FPU_H */
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f89515adac60..5be3ddc96a50 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -1,18 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
-extern void mcount(void);
extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
-#ifdef CONFIG_OLD_MCOUNT
- bool old_mcount;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct module *mod;
#endif
};
@@ -21,9 +27,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1;
}
-
-extern void ftrace_caller_old(void);
-extern void ftrace_call_old(void);
#endif
#endif
@@ -45,22 +48,36 @@ void *return_address(unsigned int);
#else
-extern inline void *return_address(unsigned int level)
+static inline void *return_address(unsigned int level)
{
- return NULL;
+ return NULL;
}
#endif
-#define HAVE_ARCH_CALLER_ADDR
+#define ftrace_return_address(n) return_address(n)
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)return_address(1))
-#define CALLER_ADDR2 ((unsigned long)return_address(2))
-#define CALLER_ADDR3 ((unsigned long)return_address(3))
-#define CALLER_ADDR4 ((unsigned long)return_address(4))
-#define CALLER_ADDR5 ((unsigned long)return_address(5))
-#define CALLER_ADDR6 ((unsigned long)return_address(6))
+void prepare_ftrace_return(unsigned long *parent, unsigned long self,
+ unsigned long frame_pointer,
+ unsigned long stack_pointer);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..a9151884bc85 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_FUTEX_H
#define _ASM_ARM_FUTEX_H
#ifdef __KERNEL__
-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
-/* ARM doesn't provide unprivileged exclusive memory accessors */
-#include <asm-generic/futex.h>
-#else
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -18,7 +14,7 @@
" .align 3\n" \
" .long 1b, 4f, 2b, 4f\n" \
" .popsection\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, " err_reg "\n" \
" b 3b\n" \
@@ -27,7 +23,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
+ prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -38,19 +38,25 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -63,6 +69,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -75,6 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -83,50 +92,54 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
+ preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ " .syntax unified\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: " TUSER(streq) " %3, [%4]\n"
+ "2: " TUSERCOND(str, eq) " %3, [%4]\n"
__futex_atomic_ex_table("%5")
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
+ preempt_enable();
+
return ret;
}
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
- pagefault_disable(); /* implies preempt_disable() */
+#ifndef CONFIG_SMP
+ preempt_disable();
+#endif
switch (op) {
case FUTEX_OP_SET:
@@ -148,22 +161,20 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
- pagefault_enable(); /* subsumes preempt_enable() */
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+#ifndef CONFIG_SMP
+ preempt_enable();
+#endif
+
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
+
return ret;
}
-#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index c81adc08b3fb..4186fbf7341f 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
@@ -117,49 +114,37 @@
# endif
#endif
+#if defined(CONFIG_CACHE_B15_RAC)
+# define MULTI_CACHE 1
+#endif
+
+#ifdef CONFIG_CPU_CACHE_NOP
+# define MULTI_CACHE 1
+#endif
+
#if defined(CONFIG_CPU_V7M)
-# ifdef _CACHE
# define MULTI_CACHE 1
-# else
-# define _CACHE nop
-# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintenance model
#endif
-#ifndef __ASSEMBLER__
-extern inline void nop_flush_icache_all(void) { }
-extern inline void nop_flush_kern_cache_all(void) { }
-extern inline void nop_flush_kern_cache_louis(void) { }
-extern inline void nop_flush_user_cache_all(void) { }
-extern inline void nop_flush_user_cache_range(unsigned long a,
- unsigned long b, unsigned int c) { }
-
-extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
-extern inline int nop_coherent_user_range(unsigned long a,
- unsigned long b) { return 0; }
-extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
-
-extern inline void nop_dma_flush_range(const void *a, const void *b) { }
-
-extern inline void nop_dma_map_area(const void *s, size_t l, int f) { }
-extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
-#endif
-
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+/* This function only has a dedicated assembly callback on the v7 cache */
+#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
+#else
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
+#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 6b70f1b46a6e..209e46c02ddd 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
@@ -31,14 +28,6 @@
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
-#if defined(CONFIG_CPU_ARM710)
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER cpu_arm7_data_abort
-# endif
-#endif
-
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h
index d385f37c13f0..a033929fad3a 100644
--- a/arch/arm/include/asm/glue-pf.h
+++ b/arch/arm/include/asm/glue-pf.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 74a8b84f3cb1..52df74aa3c2c 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
@@ -221,15 +218,6 @@
# endif
#endif
-#ifdef CONFIG_CPU_V7
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_v7
-# endif
-#endif
-
#ifdef CONFIG_CPU_V7M
# ifdef CPU_NAME
# undef MULTI_CPU
@@ -248,6 +236,15 @@
# endif
#endif
+#ifdef CONFIG_CPU_V7
+/*
+ * Cortex-A9 needs a different suspend/resume function, so we need
+ * multiple CPU support for ARMv7 anyway.
+ */
+# undef MULTI_CPU
+# define MULTI_CPU
+#endif
+
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index fbf71d75ec83..377fd4cfab01 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/glue.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file provides the glue to stick the processor-specific bits
* into the kernel in an efficient manner. The idea is to use branches
* when we're only targeting one class of TLB, or indirect calls
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
deleted file mode 100644
index 477e0206e016..000000000000
--- a/arch/arm/include/asm/gpio.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _ARCH_ARM_GPIO_H
-#define _ARCH_ARM_GPIO_H
-
-#if CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#endif
-
-/* not all ARM platforms necessarily support this API ... */
-#ifdef CONFIG_NEED_MACH_GPIO_H
-#include <mach/gpio.h>
-#endif
-
-#ifndef __ARM_GPIOLIB_COMPLEX
-/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
-#include <asm-generic/gpio.h>
-
-/* The trivial gpiolib dispatchers */
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#endif
-
-/*
- * Provide a default gpio_to_irq() which should satisfy every case.
- * However, some platforms want to do this differently, so allow them
- * to override it.
- */
-#ifndef gpio_to_irq
-#define gpio_to_irq __gpio_to_irq
-#endif
-
-#endif /* _ARCH_ARM_GPIO_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..706efafbf972 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -1,32 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
-
-typedef struct {
- unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#else
-#define smp_irq_stat_cpu(cpu) 0
-#endif
-
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h
deleted file mode 100644
index d6030ff599db..000000000000
--- a/arch/arm/include/asm/hardware/arm_timer.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
-#define __ASM_ARM_HARDWARE_ARM_TIMER_H
-
-/*
- * ARM timer implementation, found in Integrator, Versatile and Realview
- * platforms. Not all platforms support all registers and bits in these
- * registers, so we mark them with A for Integrator AP, C for Integrator
- * CP, V for Versatile and R for Realview.
- *
- * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview
- * can have 16-bit or 32-bit selectable via a bit in the control register.
- *
- * Every SP804 contains two identical timers.
- */
-#define TIMER_1_BASE 0x00
-#define TIMER_2_BASE 0x20
-
-#define TIMER_LOAD 0x00 /* ACVR rw */
-#define TIMER_VALUE 0x04 /* ACVR ro */
-#define TIMER_CTRL 0x08 /* ACVR rw */
-#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */
-#define TIMER_CTRL_32BIT (1 << 1) /* CVR */
-#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */
-#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */
-#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */
-#define TIMER_CTRL_IE (1 << 5) /* VR */
-#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */
-#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */
-
-#define TIMER_INTCLR 0x0c /* ACVR wo */
-#define TIMER_RIS 0x10 /* CVR ro */
-#define TIMER_MIS 0x14 /* CVR ro */
-#define TIMER_BGLOAD 0x18 /* CVR rw */
-
-#endif
diff --git a/arch/arm/include/asm/hardware/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
new file mode 100644
index 000000000000..9694808ee97c
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AURORA shared L2 cache controller support
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
+#define __ASM_ARM_HARDWARE_AURORA_L2_H
+
+#define AURORA_SYNC_REG 0x700
+#define AURORA_RANGE_BASE_ADDR_REG 0x720
+#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
+#define AURORA_INVAL_RANGE_REG 0x774
+#define AURORA_CLEAN_RANGE_REG 0x7b4
+#define AURORA_FLUSH_RANGE_REG 0x7f4
+
+#define AURORA_ACR_REPLACEMENT_OFFSET 27
+#define AURORA_ACR_REPLACEMENT_MASK \
+ (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
+ (0 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
+ (1 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
+ (3 << AURORA_ACR_REPLACEMENT_OFFSET)
+
+#define AURORA_ACR_PARITY_EN (1 << 21)
+#define AURORA_ACR_ECC_EN (1 << 20)
+
+#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
+#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
+ (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
+ (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
+ (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
+ (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
+
+#define AURORA_ERR_CNT_REG 0x600
+#define AURORA_ERR_ATTR_CAP_REG 0x608
+#define AURORA_ERR_ADDR_CAP_REG 0x60c
+#define AURORA_ERR_WAY_CAP_REG 0x610
+#define AURORA_ERR_INJECT_CTL_REG 0x614
+#define AURORA_ERR_INJECT_MASK_REG 0x618
+
+#define AURORA_ERR_CNT_CLR_OFFSET 31
+#define AURORA_ERR_CNT_CLR \
+ (0x1 << AURORA_ERR_CNT_CLR_OFFSET)
+#define AURORA_ERR_CNT_UE_OFFSET 16
+#define AURORA_ERR_CNT_UE_MASK \
+ (0x7fff << AURORA_ERR_CNT_UE_OFFSET)
+#define AURORA_ERR_CNT_CE_OFFSET 0
+#define AURORA_ERR_CNT_CE_MASK \
+ (0xffff << AURORA_ERR_CNT_CE_OFFSET)
+
+#define AURORA_ERR_ATTR_SRC_OFF 16
+#define AURORA_ERR_ATTR_SRC_MSK \
+ (0x7 << AURORA_ERR_ATTR_SRC_OFF)
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_ATTR_TXN_MSK \
+ (0xf << AURORA_ERR_ATTR_TXN_OFF)
+#define AURORA_ERR_ATTR_ERR_OFF 8
+#define AURORA_ERR_ATTR_ERR_MSK \
+ (0x3 << AURORA_ERR_ATTR_ERR_OFF)
+#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
+#define AURORA_ERR_ATTR_CAP_VALID \
+ (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
+
+#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
+
+#define AURORA_ERR_WAY_IDX_OFF 8
+#define AURORA_ERR_WAY_IDX_MSK \
+ (0xfff << AURORA_ERR_WAY_IDX_OFF)
+#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
+#define AURORA_ERR_WAY_CAP_WAY_MASK \
+ (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
+
+#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
+#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
+#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
+
+#define AURORA_MAX_RANGE_SIZE 1024
+
+#define AURORA_WAY_SIZE_SHIFT 2
+
+#define AURORA_CTRL_FW 0x100
+
+/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
+ * the distinction between a number coming from hardware and a number
+ * coming from the device tree */
+#define AURORA_CACHE_ID 0x100
+
+#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/include/asm/hardware/cache-b15-rac.h b/arch/arm/include/asm/hardware/cache-b15-rac.h
new file mode 100644
index 000000000000..3d43ec06fd35
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-b15-rac.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+#define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+
+#ifndef __ASSEMBLY__
+
+void b15_flush_kern_cache_all(void);
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
new file mode 100644
index 000000000000..eb2e7b7f70a8
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/hardware/cache-feroceon-l2.h
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ */
+
+extern void __init feroceon_l2_init(int l2_wt_override);
+extern int __init feroceon_of_init(void);
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 3b2c40b5bfa2..5a7ee70f561c 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -1,33 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-l2x0.h
*
* Copyright (C) 2007 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARM_HARDWARE_L2X0_H
#define __ASM_ARM_HARDWARE_L2X0_H
#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
#define L2X0_CACHE_ID 0x000
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
-#define L2X0_TAG_LATENCY_CTRL 0x108
-#define L2X0_DATA_LATENCY_CTRL 0x10C
+#define L310_TAG_LATENCY_CTRL 0x108
+#define L310_DATA_LATENCY_CTRL 0x10C
#define L2X0_EVENT_CNT_CTRL 0x200
#define L2X0_EVENT_CNT1_CFG 0x204
#define L2X0_EVENT_CNT0_CFG 0x208
@@ -54,53 +44,104 @@
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
-#define L2X0_ADDR_FILTER_START 0xC00
-#define L2X0_ADDR_FILTER_END 0xC04
+#define L310_ADDR_FILTER_START 0xC00
+#define L310_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
-#define L2X0_PREFETCH_CTRL 0xF60
-#define L2X0_POWER_CTRL 0xF80
-#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
-#define L2X0_STNDBY_MODE_EN (1 << 0)
+#define L310_PREFETCH_CTRL 0xF60
+#define L310_POWER_CTRL 0xF80
+#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
+#define L310_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L220 (2 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_ID_RTL_MASK 0x3f
-#define L2X0_CACHE_ID_RTL_R0P0 0x0
-#define L2X0_CACHE_ID_RTL_R1P0 0x2
-#define L2X0_CACHE_ID_RTL_R2P0 0x4
-#define L2X0_CACHE_ID_RTL_R3P0 0x5
-#define L2X0_CACHE_ID_RTL_R3P1 0x6
-#define L2X0_CACHE_ID_RTL_R3P2 0x8
-
-#define L2X0_AUX_CTRL_MASK 0xc0000fff
+#define L210_CACHE_ID_RTL_R0P2_02 0x00
+#define L210_CACHE_ID_RTL_R0P1 0x01
+#define L210_CACHE_ID_RTL_R0P2_01 0x02
+#define L210_CACHE_ID_RTL_R0P3 0x03
+#define L210_CACHE_ID_RTL_R0P4 0x0b
+#define L210_CACHE_ID_RTL_R0P5 0x0f
+#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
+#define L310_CACHE_ID_RTL_R0P0 0x00
+#define L310_CACHE_ID_RTL_R1P0 0x02
+#define L310_CACHE_ID_RTL_R2P0 0x04
+#define L310_CACHE_ID_RTL_R3P0 0x05
+#define L310_CACHE_ID_RTL_R3P1 0x06
+#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
+#define L310_CACHE_ID_RTL_R3P2 0x08
+#define L310_CACHE_ID_RTL_R3P3 0x09
+
+#define L2X0_EVENT_CNT_CTRL_ENABLE BIT(0)
+
+#define L2X0_EVENT_CNT_CFG_SRC_SHIFT 2
+#define L2X0_EVENT_CNT_CFG_SRC_MASK 0xf
+#define L2X0_EVENT_CNT_CFG_SRC_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_INCR 1
+#define L2X0_EVENT_CNT_CFG_INT_OVERFLOW 2
+
+/* L2C auxiliary control register - bits common to L2C-210/220/310 */
+#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
+#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
+#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
+#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
+#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
+#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
+/* L2C-210/220 common bits */
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
-#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
-#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
-#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
+#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
-#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
-#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
-#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
-#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
-#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
-#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
-#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
-#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
-#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
-
-#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
-#define L2X0_LATENCY_CTRL_RD_SHIFT 4
-#define L2X0_LATENCY_CTRL_WR_SHIFT 8
-
-#define L2X0_ADDR_FILTER_EN 1
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
+#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
+#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
+/* L2C-210 specific bits */
+#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
+#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
+#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
+/* L2C-220 specific bits */
+#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L220_AUX_CTRL_FWA_SHIFT 23
+#define L220_AUX_CTRL_FWA_MASK (3 << 23)
+#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
+/* L2C-310 specific bits */
+#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
+#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
+#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
+#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
+#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
+#define L310_AUX_CTRL_FWA_SHIFT 23
+#define L310_AUX_CTRL_FWA_MASK (3 << 23)
+#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
+#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
+#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
+#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
+#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
+
+#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
+#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
+#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
+
+#define L310_ADDR_FILTER_EN 1
+
+#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
+#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
+#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
+#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
+#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
#define L2X0_CTRL_EN 1
@@ -117,6 +158,16 @@ static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
}
#endif
+#ifdef CONFIG_CACHE_L2X0_PMU
+void l2x0_pmu_register(void __iomem *base, u32 part);
+void l2x0_pmu_suspend(void);
+void l2x0_pmu_resume(void);
+#else
+static inline void l2x0_pmu_register(void __iomem *base, u32 part) {}
+static inline void l2x0_pmu_suspend(void) {}
+static inline void l2x0_pmu_resume(void) {}
+#endif
+
struct l2x0_regs {
unsigned long phy_base;
unsigned long aux_ctrl;
@@ -131,6 +182,7 @@ struct l2x0_regs {
unsigned long prefetch_ctrl;
unsigned long pwr_ctrl;
unsigned long ctrl;
+ unsigned long aux2_ctrl;
};
extern struct l2x0_regs l2x0_saved_regs;
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
index 295e2e40151b..4e493facaa31 100644
--- a/arch/arm/include/asm/hardware/cache-tauros2.h
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-tauros2.h
*
* Copyright (C) 2008 Marvell Semiconductor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
diff --git a/arch/arm/include/asm/hardware/cache-uniphier.h b/arch/arm/include/asm/hardware/cache-uniphier.h
new file mode 100644
index 000000000000..b1fefca65d4d
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-uniphier.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#ifndef __CACHE_UNIPHIER_H
+#define __CACHE_UNIPHIER_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_CACHE_UNIPHIER
+int uniphier_cache_init(void);
+#else
+static inline int uniphier_cache_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __CACHE_UNIPHIER_H */
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
deleted file mode 100644
index 0cf7a6b842ff..000000000000
--- a/arch/arm/include/asm/hardware/coresight.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * linux/arch/arm/include/asm/hardware/coresight.h
- *
- * CoreSight components' registers
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_HARDWARE_CORESIGHT_H
-#define __ASM_HARDWARE_CORESIGHT_H
-
-#define TRACER_ACCESSED_BIT 0
-#define TRACER_RUNNING_BIT 1
-#define TRACER_CYCLE_ACC_BIT 2
-#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
-#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
-#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
-
-#define TRACER_TIMEOUT 10000
-
-#define etm_writel(t, v, x) \
- (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
-
-/* CoreSight Management Registers */
-#define CSMR_LOCKACCESS 0xfb0
-#define CSMR_LOCKSTATUS 0xfb4
-#define CSMR_AUTHSTATUS 0xfb8
-#define CSMR_DEVID 0xfc8
-#define CSMR_DEVTYPE 0xfcc
-/* CoreSight Component Registers */
-#define CSCR_CLASS 0xff4
-
-#define CS_LAR_KEY 0xc5acce55
-
-/* ETM control register, "ETM Architecture", 3.3.1 */
-#define ETMR_CTRL 0
-#define ETMCTRL_POWERDOWN 1
-#define ETMCTRL_PROGRAM (1 << 10)
-#define ETMCTRL_PORTSEL (1 << 11)
-#define ETMCTRL_DO_CONTEXTID (3 << 14)
-#define ETMCTRL_PORTMASK1 (7 << 4)
-#define ETMCTRL_PORTMASK2 (1 << 21)
-#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
-#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
-#define ETMCTRL_DO_CPRT (1 << 1)
-#define ETMCTRL_DATAMASK (3 << 2)
-#define ETMCTRL_DATA_DO_DATA (1 << 2)
-#define ETMCTRL_DATA_DO_ADDR (1 << 3)
-#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
-#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
-#define ETMCTRL_CYCLEACCURATE (1 << 12)
-
-/* ETM configuration code register */
-#define ETMR_CONFCODE (0x04)
-
-/* ETM trace start/stop resource control register */
-#define ETMR_TRACESSCTRL (0x18)
-
-/* ETM trigger event register */
-#define ETMR_TRIGEVT (0x08)
-
-/* address access type register bits, "ETM architecture",
- * table 3-27 */
-/* - access type */
-#define ETMAAT_IFETCH 0
-#define ETMAAT_IEXEC 1
-#define ETMAAT_IEXECPASS 2
-#define ETMAAT_IEXECFAIL 3
-#define ETMAAT_DLOADSTORE 4
-#define ETMAAT_DLOAD 5
-#define ETMAAT_DSTORE 6
-/* - comparison access size */
-#define ETMAAT_JAVA (0 << 3)
-#define ETMAAT_THUMB (1 << 3)
-#define ETMAAT_ARM (3 << 3)
-/* - data value comparison control */
-#define ETMAAT_NOVALCMP (0 << 5)
-#define ETMAAT_VALMATCH (1 << 5)
-#define ETMAAT_VALNOMATCH (3 << 5)
-/* - exact match */
-#define ETMAAT_EXACTMATCH (1 << 7)
-/* - context id comparator control */
-#define ETMAAT_IGNCONTEXTID (0 << 8)
-#define ETMAAT_VALUE1 (1 << 8)
-#define ETMAAT_VALUE2 (2 << 8)
-#define ETMAAT_VALUE3 (3 << 8)
-/* - security level control */
-#define ETMAAT_IGNSECURITY (0 << 10)
-#define ETMAAT_NSONLY (1 << 10)
-#define ETMAAT_SONLY (2 << 10)
-
-#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
-#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
-
-/* ETM status register, "ETM Architecture", 3.3.2 */
-#define ETMR_STATUS (0x10)
-#define ETMST_OVERFLOW BIT(0)
-#define ETMST_PROGBIT BIT(1)
-#define ETMST_STARTSTOP BIT(2)
-#define ETMST_TRIGGER BIT(3)
-
-#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
-#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
-#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
-
-#define ETMR_TRACEENCTRL2 0x1c
-#define ETMR_TRACEENCTRL 0x24
-#define ETMTE_INCLEXCL BIT(24)
-#define ETMR_TRACEENEVT 0x20
-#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
- ETMCTRL_DATA_DO_ADDR | \
- ETMCTRL_BRANCH_OUTPUT | \
- ETMCTRL_DO_CONTEXTID)
-
-/* ETM management registers, "ETM Architecture", 3.5.24 */
-#define ETMMR_OSLAR 0x300
-#define ETMMR_OSLSR 0x304
-#define ETMMR_OSSRR 0x308
-#define ETMMR_PDSR 0x314
-
-/* ETB registers, "CoreSight Components TRM", 9.3 */
-#define ETBR_DEPTH 0x04
-#define ETBR_STATUS 0x0c
-#define ETBR_READMEM 0x10
-#define ETBR_READADDR 0x14
-#define ETBR_WRITEADDR 0x18
-#define ETBR_TRIGGERCOUNT 0x1c
-#define ETBR_CTRL 0x20
-#define ETBR_FORMATTERCTRL 0x304
-#define ETBFF_ENFTC 1
-#define ETBFF_ENFCONT BIT(1)
-#define ETBFF_FONFLIN BIT(4)
-#define ETBFF_MANUAL_FLUSH BIT(6)
-#define ETBFF_TRIGIN BIT(8)
-#define ETBFF_TRIGEVT BIT(9)
-#define ETBFF_TRIGFL BIT(10)
-
-#define etb_writel(t, v, x) \
- (__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
-
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
- do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etb_unlock(t) \
- do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#endif /* __ASM_HARDWARE_CORESIGHT_H */
-
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
new file mode 100644
index 000000000000..44f2bde379c5
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cp14.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ASM_HARDWARE_CP14_H
+#define __ASM_HARDWARE_CP14_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+#define etm_read(reg) RCP14_##reg()
+#define etm_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 and MCR14 */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+u32 val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv7
+ * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR
+ *
+ * Available only in DBGv7.1
+ * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR,
+ * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGECR() MRC14(0, c0, c9, 0)
+#define RCP14_DBGDSCCR() MRC14(0, c0, c10, 0)
+#define RCP14_DBGDSMCR() MRC14(0, c0, c11, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGDRCR() MRC14(0, c0, c4, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGECR(val) MCR14(val, 0, c0, c9, 0)
+#define WCP14_DBGDSCCR(val) MCR14(val, 0, c0, c10, 0)
+#define WCP14_DBGDSMCR(val) MCR14(val, 0, c0, c11, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGDRCR(val) MCR14(val, 0, c0, c4, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+/*
+ * ETM Registers
+ *
+ * Available only in ETMv3.3, 3.4, 3.5
+ * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3,
+ * ETMDCVRn, ETMDCMRn
+ *
+ * Available only in ETMv3.5 as read only
+ * ETMIDR2
+ *
+ * Available only in ETMv3.5, PFTv1.0, 1.1
+ * ETMTSEVR, ETMVMIDCVR, ETMPDCR
+ *
+ * Read only
+ * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR
+ * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6,
+ * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0,
+ * ETMCIDR1, ETMCIDR2, ETMCIDR3
+ *
+ * Write only
+ * ETMOSLAR, ETMLAR
+ * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec.
+ */
+#define RCP14_ETMCR() MRC14(1, c0, c0, 0)
+#define RCP14_ETMCCR() MRC14(1, c0, c1, 0)
+#define RCP14_ETMTRIGGER() MRC14(1, c0, c2, 0)
+#define RCP14_ETMASICCR() MRC14(1, c0, c3, 0)
+#define RCP14_ETMSR() MRC14(1, c0, c4, 0)
+#define RCP14_ETMSCR() MRC14(1, c0, c5, 0)
+#define RCP14_ETMTSSCR() MRC14(1, c0, c6, 0)
+#define RCP14_ETMTECR2() MRC14(1, c0, c7, 0)
+#define RCP14_ETMTEEVR() MRC14(1, c0, c8, 0)
+#define RCP14_ETMTECR1() MRC14(1, c0, c9, 0)
+#define RCP14_ETMFFRR() MRC14(1, c0, c10, 0)
+#define RCP14_ETMFFLR() MRC14(1, c0, c11, 0)
+#define RCP14_ETMVDEVR() MRC14(1, c0, c12, 0)
+#define RCP14_ETMVDCR1() MRC14(1, c0, c13, 0)
+#define RCP14_ETMVDCR2() MRC14(1, c0, c14, 0)
+#define RCP14_ETMVDCR3() MRC14(1, c0, c15, 0)
+#define RCP14_ETMACVR0() MRC14(1, c0, c0, 1)
+#define RCP14_ETMACVR1() MRC14(1, c0, c1, 1)
+#define RCP14_ETMACVR2() MRC14(1, c0, c2, 1)
+#define RCP14_ETMACVR3() MRC14(1, c0, c3, 1)
+#define RCP14_ETMACVR4() MRC14(1, c0, c4, 1)
+#define RCP14_ETMACVR5() MRC14(1, c0, c5, 1)
+#define RCP14_ETMACVR6() MRC14(1, c0, c6, 1)
+#define RCP14_ETMACVR7() MRC14(1, c0, c7, 1)
+#define RCP14_ETMACVR8() MRC14(1, c0, c8, 1)
+#define RCP14_ETMACVR9() MRC14(1, c0, c9, 1)
+#define RCP14_ETMACVR10() MRC14(1, c0, c10, 1)
+#define RCP14_ETMACVR11() MRC14(1, c0, c11, 1)
+#define RCP14_ETMACVR12() MRC14(1, c0, c12, 1)
+#define RCP14_ETMACVR13() MRC14(1, c0, c13, 1)
+#define RCP14_ETMACVR14() MRC14(1, c0, c14, 1)
+#define RCP14_ETMACVR15() MRC14(1, c0, c15, 1)
+#define RCP14_ETMACTR0() MRC14(1, c0, c0, 2)
+#define RCP14_ETMACTR1() MRC14(1, c0, c1, 2)
+#define RCP14_ETMACTR2() MRC14(1, c0, c2, 2)
+#define RCP14_ETMACTR3() MRC14(1, c0, c3, 2)
+#define RCP14_ETMACTR4() MRC14(1, c0, c4, 2)
+#define RCP14_ETMACTR5() MRC14(1, c0, c5, 2)
+#define RCP14_ETMACTR6() MRC14(1, c0, c6, 2)
+#define RCP14_ETMACTR7() MRC14(1, c0, c7, 2)
+#define RCP14_ETMACTR8() MRC14(1, c0, c8, 2)
+#define RCP14_ETMACTR9() MRC14(1, c0, c9, 2)
+#define RCP14_ETMACTR10() MRC14(1, c0, c10, 2)
+#define RCP14_ETMACTR11() MRC14(1, c0, c11, 2)
+#define RCP14_ETMACTR12() MRC14(1, c0, c12, 2)
+#define RCP14_ETMACTR13() MRC14(1, c0, c13, 2)
+#define RCP14_ETMACTR14() MRC14(1, c0, c14, 2)
+#define RCP14_ETMACTR15() MRC14(1, c0, c15, 2)
+#define RCP14_ETMDCVR0() MRC14(1, c0, c0, 3)
+#define RCP14_ETMDCVR2() MRC14(1, c0, c2, 3)
+#define RCP14_ETMDCVR4() MRC14(1, c0, c4, 3)
+#define RCP14_ETMDCVR6() MRC14(1, c0, c6, 3)
+#define RCP14_ETMDCVR8() MRC14(1, c0, c8, 3)
+#define RCP14_ETMDCVR10() MRC14(1, c0, c10, 3)
+#define RCP14_ETMDCVR12() MRC14(1, c0, c12, 3)
+#define RCP14_ETMDCVR14() MRC14(1, c0, c14, 3)
+#define RCP14_ETMDCMR0() MRC14(1, c0, c0, 4)
+#define RCP14_ETMDCMR2() MRC14(1, c0, c2, 4)
+#define RCP14_ETMDCMR4() MRC14(1, c0, c4, 4)
+#define RCP14_ETMDCMR6() MRC14(1, c0, c6, 4)
+#define RCP14_ETMDCMR8() MRC14(1, c0, c8, 4)
+#define RCP14_ETMDCMR10() MRC14(1, c0, c10, 4)
+#define RCP14_ETMDCMR12() MRC14(1, c0, c12, 4)
+#define RCP14_ETMDCMR14() MRC14(1, c0, c14, 4)
+#define RCP14_ETMCNTRLDVR0() MRC14(1, c0, c0, 5)
+#define RCP14_ETMCNTRLDVR1() MRC14(1, c0, c1, 5)
+#define RCP14_ETMCNTRLDVR2() MRC14(1, c0, c2, 5)
+#define RCP14_ETMCNTRLDVR3() MRC14(1, c0, c3, 5)
+#define RCP14_ETMCNTENR0() MRC14(1, c0, c4, 5)
+#define RCP14_ETMCNTENR1() MRC14(1, c0, c5, 5)
+#define RCP14_ETMCNTENR2() MRC14(1, c0, c6, 5)
+#define RCP14_ETMCNTENR3() MRC14(1, c0, c7, 5)
+#define RCP14_ETMCNTRLDEVR0() MRC14(1, c0, c8, 5)
+#define RCP14_ETMCNTRLDEVR1() MRC14(1, c0, c9, 5)
+#define RCP14_ETMCNTRLDEVR2() MRC14(1, c0, c10, 5)
+#define RCP14_ETMCNTRLDEVR3() MRC14(1, c0, c11, 5)
+#define RCP14_ETMCNTVR0() MRC14(1, c0, c12, 5)
+#define RCP14_ETMCNTVR1() MRC14(1, c0, c13, 5)
+#define RCP14_ETMCNTVR2() MRC14(1, c0, c14, 5)
+#define RCP14_ETMCNTVR3() MRC14(1, c0, c15, 5)
+#define RCP14_ETMSQ12EVR() MRC14(1, c0, c0, 6)
+#define RCP14_ETMSQ21EVR() MRC14(1, c0, c1, 6)
+#define RCP14_ETMSQ23EVR() MRC14(1, c0, c2, 6)
+#define RCP14_ETMSQ31EVR() MRC14(1, c0, c3, 6)
+#define RCP14_ETMSQ32EVR() MRC14(1, c0, c4, 6)
+#define RCP14_ETMSQ13EVR() MRC14(1, c0, c5, 6)
+#define RCP14_ETMSQR() MRC14(1, c0, c7, 6)
+#define RCP14_ETMEXTOUTEVR0() MRC14(1, c0, c8, 6)
+#define RCP14_ETMEXTOUTEVR1() MRC14(1, c0, c9, 6)
+#define RCP14_ETMEXTOUTEVR2() MRC14(1, c0, c10, 6)
+#define RCP14_ETMEXTOUTEVR3() MRC14(1, c0, c11, 6)
+#define RCP14_ETMCIDCVR0() MRC14(1, c0, c12, 6)
+#define RCP14_ETMCIDCVR1() MRC14(1, c0, c13, 6)
+#define RCP14_ETMCIDCVR2() MRC14(1, c0, c14, 6)
+#define RCP14_ETMCIDCMR() MRC14(1, c0, c15, 6)
+#define RCP14_ETMIMPSPEC0() MRC14(1, c0, c0, 7)
+#define RCP14_ETMIMPSPEC1() MRC14(1, c0, c1, 7)
+#define RCP14_ETMIMPSPEC2() MRC14(1, c0, c2, 7)
+#define RCP14_ETMIMPSPEC3() MRC14(1, c0, c3, 7)
+#define RCP14_ETMIMPSPEC4() MRC14(1, c0, c4, 7)
+#define RCP14_ETMIMPSPEC5() MRC14(1, c0, c5, 7)
+#define RCP14_ETMIMPSPEC6() MRC14(1, c0, c6, 7)
+#define RCP14_ETMIMPSPEC7() MRC14(1, c0, c7, 7)
+#define RCP14_ETMSYNCFR() MRC14(1, c0, c8, 7)
+#define RCP14_ETMIDR() MRC14(1, c0, c9, 7)
+#define RCP14_ETMCCER() MRC14(1, c0, c10, 7)
+#define RCP14_ETMEXTINSELR() MRC14(1, c0, c11, 7)
+#define RCP14_ETMTESSEICR() MRC14(1, c0, c12, 7)
+#define RCP14_ETMEIBCR() MRC14(1, c0, c13, 7)
+#define RCP14_ETMTSEVR() MRC14(1, c0, c14, 7)
+#define RCP14_ETMAUXCR() MRC14(1, c0, c15, 7)
+#define RCP14_ETMTRACEIDR() MRC14(1, c1, c0, 0)
+#define RCP14_ETMIDR2() MRC14(1, c1, c2, 0)
+#define RCP14_ETMVMIDCVR() MRC14(1, c1, c0, 1)
+#define RCP14_ETMOSLSR() MRC14(1, c1, c1, 4)
+/* Not available in PFTv1.1 */
+#define RCP14_ETMOSSRR() MRC14(1, c1, c2, 4)
+#define RCP14_ETMPDCR() MRC14(1, c1, c4, 4)
+#define RCP14_ETMPDSR() MRC14(1, c1, c5, 4)
+#define RCP14_ETMITCTRL() MRC14(1, c7, c0, 4)
+#define RCP14_ETMCLAIMSET() MRC14(1, c7, c8, 6)
+#define RCP14_ETMCLAIMCLR() MRC14(1, c7, c9, 6)
+#define RCP14_ETMLSR() MRC14(1, c7, c13, 6)
+#define RCP14_ETMAUTHSTATUS() MRC14(1, c7, c14, 6)
+#define RCP14_ETMDEVID() MRC14(1, c7, c2, 7)
+#define RCP14_ETMDEVTYPE() MRC14(1, c7, c3, 7)
+#define RCP14_ETMPIDR4() MRC14(1, c7, c4, 7)
+#define RCP14_ETMPIDR5() MRC14(1, c7, c5, 7)
+#define RCP14_ETMPIDR6() MRC14(1, c7, c6, 7)
+#define RCP14_ETMPIDR7() MRC14(1, c7, c7, 7)
+#define RCP14_ETMPIDR0() MRC14(1, c7, c8, 7)
+#define RCP14_ETMPIDR1() MRC14(1, c7, c9, 7)
+#define RCP14_ETMPIDR2() MRC14(1, c7, c10, 7)
+#define RCP14_ETMPIDR3() MRC14(1, c7, c11, 7)
+#define RCP14_ETMCIDR0() MRC14(1, c7, c12, 7)
+#define RCP14_ETMCIDR1() MRC14(1, c7, c13, 7)
+#define RCP14_ETMCIDR2() MRC14(1, c7, c14, 7)
+#define RCP14_ETMCIDR3() MRC14(1, c7, c15, 7)
+
+#define WCP14_ETMCR(val) MCR14(val, 1, c0, c0, 0)
+#define WCP14_ETMTRIGGER(val) MCR14(val, 1, c0, c2, 0)
+#define WCP14_ETMASICCR(val) MCR14(val, 1, c0, c3, 0)
+#define WCP14_ETMSR(val) MCR14(val, 1, c0, c4, 0)
+#define WCP14_ETMTSSCR(val) MCR14(val, 1, c0, c6, 0)
+#define WCP14_ETMTECR2(val) MCR14(val, 1, c0, c7, 0)
+#define WCP14_ETMTEEVR(val) MCR14(val, 1, c0, c8, 0)
+#define WCP14_ETMTECR1(val) MCR14(val, 1, c0, c9, 0)
+#define WCP14_ETMFFRR(val) MCR14(val, 1, c0, c10, 0)
+#define WCP14_ETMFFLR(val) MCR14(val, 1, c0, c11, 0)
+#define WCP14_ETMVDEVR(val) MCR14(val, 1, c0, c12, 0)
+#define WCP14_ETMVDCR1(val) MCR14(val, 1, c0, c13, 0)
+#define WCP14_ETMVDCR2(val) MCR14(val, 1, c0, c14, 0)
+#define WCP14_ETMVDCR3(val) MCR14(val, 1, c0, c15, 0)
+#define WCP14_ETMACVR0(val) MCR14(val, 1, c0, c0, 1)
+#define WCP14_ETMACVR1(val) MCR14(val, 1, c0, c1, 1)
+#define WCP14_ETMACVR2(val) MCR14(val, 1, c0, c2, 1)
+#define WCP14_ETMACVR3(val) MCR14(val, 1, c0, c3, 1)
+#define WCP14_ETMACVR4(val) MCR14(val, 1, c0, c4, 1)
+#define WCP14_ETMACVR5(val) MCR14(val, 1, c0, c5, 1)
+#define WCP14_ETMACVR6(val) MCR14(val, 1, c0, c6, 1)
+#define WCP14_ETMACVR7(val) MCR14(val, 1, c0, c7, 1)
+#define WCP14_ETMACVR8(val) MCR14(val, 1, c0, c8, 1)
+#define WCP14_ETMACVR9(val) MCR14(val, 1, c0, c9, 1)
+#define WCP14_ETMACVR10(val) MCR14(val, 1, c0, c10, 1)
+#define WCP14_ETMACVR11(val) MCR14(val, 1, c0, c11, 1)
+#define WCP14_ETMACVR12(val) MCR14(val, 1, c0, c12, 1)
+#define WCP14_ETMACVR13(val) MCR14(val, 1, c0, c13, 1)
+#define WCP14_ETMACVR14(val) MCR14(val, 1, c0, c14, 1)
+#define WCP14_ETMACVR15(val) MCR14(val, 1, c0, c15, 1)
+#define WCP14_ETMACTR0(val) MCR14(val, 1, c0, c0, 2)
+#define WCP14_ETMACTR1(val) MCR14(val, 1, c0, c1, 2)
+#define WCP14_ETMACTR2(val) MCR14(val, 1, c0, c2, 2)
+#define WCP14_ETMACTR3(val) MCR14(val, 1, c0, c3, 2)
+#define WCP14_ETMACTR4(val) MCR14(val, 1, c0, c4, 2)
+#define WCP14_ETMACTR5(val) MCR14(val, 1, c0, c5, 2)
+#define WCP14_ETMACTR6(val) MCR14(val, 1, c0, c6, 2)
+#define WCP14_ETMACTR7(val) MCR14(val, 1, c0, c7, 2)
+#define WCP14_ETMACTR8(val) MCR14(val, 1, c0, c8, 2)
+#define WCP14_ETMACTR9(val) MCR14(val, 1, c0, c9, 2)
+#define WCP14_ETMACTR10(val) MCR14(val, 1, c0, c10, 2)
+#define WCP14_ETMACTR11(val) MCR14(val, 1, c0, c11, 2)
+#define WCP14_ETMACTR12(val) MCR14(val, 1, c0, c12, 2)
+#define WCP14_ETMACTR13(val) MCR14(val, 1, c0, c13, 2)
+#define WCP14_ETMACTR14(val) MCR14(val, 1, c0, c14, 2)
+#define WCP14_ETMACTR15(val) MCR14(val, 1, c0, c15, 2)
+#define WCP14_ETMDCVR0(val) MCR14(val, 1, c0, c0, 3)
+#define WCP14_ETMDCVR2(val) MCR14(val, 1, c0, c2, 3)
+#define WCP14_ETMDCVR4(val) MCR14(val, 1, c0, c4, 3)
+#define WCP14_ETMDCVR6(val) MCR14(val, 1, c0, c6, 3)
+#define WCP14_ETMDCVR8(val) MCR14(val, 1, c0, c8, 3)
+#define WCP14_ETMDCVR10(val) MCR14(val, 1, c0, c10, 3)
+#define WCP14_ETMDCVR12(val) MCR14(val, 1, c0, c12, 3)
+#define WCP14_ETMDCVR14(val) MCR14(val, 1, c0, c14, 3)
+#define WCP14_ETMDCMR0(val) MCR14(val, 1, c0, c0, 4)
+#define WCP14_ETMDCMR2(val) MCR14(val, 1, c0, c2, 4)
+#define WCP14_ETMDCMR4(val) MCR14(val, 1, c0, c4, 4)
+#define WCP14_ETMDCMR6(val) MCR14(val, 1, c0, c6, 4)
+#define WCP14_ETMDCMR8(val) MCR14(val, 1, c0, c8, 4)
+#define WCP14_ETMDCMR10(val) MCR14(val, 1, c0, c10, 4)
+#define WCP14_ETMDCMR12(val) MCR14(val, 1, c0, c12, 4)
+#define WCP14_ETMDCMR14(val) MCR14(val, 1, c0, c14, 4)
+#define WCP14_ETMCNTRLDVR0(val) MCR14(val, 1, c0, c0, 5)
+#define WCP14_ETMCNTRLDVR1(val) MCR14(val, 1, c0, c1, 5)
+#define WCP14_ETMCNTRLDVR2(val) MCR14(val, 1, c0, c2, 5)
+#define WCP14_ETMCNTRLDVR3(val) MCR14(val, 1, c0, c3, 5)
+#define WCP14_ETMCNTENR0(val) MCR14(val, 1, c0, c4, 5)
+#define WCP14_ETMCNTENR1(val) MCR14(val, 1, c0, c5, 5)
+#define WCP14_ETMCNTENR2(val) MCR14(val, 1, c0, c6, 5)
+#define WCP14_ETMCNTENR3(val) MCR14(val, 1, c0, c7, 5)
+#define WCP14_ETMCNTRLDEVR0(val) MCR14(val, 1, c0, c8, 5)
+#define WCP14_ETMCNTRLDEVR1(val) MCR14(val, 1, c0, c9, 5)
+#define WCP14_ETMCNTRLDEVR2(val) MCR14(val, 1, c0, c10, 5)
+#define WCP14_ETMCNTRLDEVR3(val) MCR14(val, 1, c0, c11, 5)
+#define WCP14_ETMCNTVR0(val) MCR14(val, 1, c0, c12, 5)
+#define WCP14_ETMCNTVR1(val) MCR14(val, 1, c0, c13, 5)
+#define WCP14_ETMCNTVR2(val) MCR14(val, 1, c0, c14, 5)
+#define WCP14_ETMCNTVR3(val) MCR14(val, 1, c0, c15, 5)
+#define WCP14_ETMSQ12EVR(val) MCR14(val, 1, c0, c0, 6)
+#define WCP14_ETMSQ21EVR(val) MCR14(val, 1, c0, c1, 6)
+#define WCP14_ETMSQ23EVR(val) MCR14(val, 1, c0, c2, 6)
+#define WCP14_ETMSQ31EVR(val) MCR14(val, 1, c0, c3, 6)
+#define WCP14_ETMSQ32EVR(val) MCR14(val, 1, c0, c4, 6)
+#define WCP14_ETMSQ13EVR(val) MCR14(val, 1, c0, c5, 6)
+#define WCP14_ETMSQR(val) MCR14(val, 1, c0, c7, 6)
+#define WCP14_ETMEXTOUTEVR0(val) MCR14(val, 1, c0, c8, 6)
+#define WCP14_ETMEXTOUTEVR1(val) MCR14(val, 1, c0, c9, 6)
+#define WCP14_ETMEXTOUTEVR2(val) MCR14(val, 1, c0, c10, 6)
+#define WCP14_ETMEXTOUTEVR3(val) MCR14(val, 1, c0, c11, 6)
+#define WCP14_ETMCIDCVR0(val) MCR14(val, 1, c0, c12, 6)
+#define WCP14_ETMCIDCVR1(val) MCR14(val, 1, c0, c13, 6)
+#define WCP14_ETMCIDCVR2(val) MCR14(val, 1, c0, c14, 6)
+#define WCP14_ETMCIDCMR(val) MCR14(val, 1, c0, c15, 6)
+#define WCP14_ETMIMPSPEC0(val) MCR14(val, 1, c0, c0, 7)
+#define WCP14_ETMIMPSPEC1(val) MCR14(val, 1, c0, c1, 7)
+#define WCP14_ETMIMPSPEC2(val) MCR14(val, 1, c0, c2, 7)
+#define WCP14_ETMIMPSPEC3(val) MCR14(val, 1, c0, c3, 7)
+#define WCP14_ETMIMPSPEC4(val) MCR14(val, 1, c0, c4, 7)
+#define WCP14_ETMIMPSPEC5(val) MCR14(val, 1, c0, c5, 7)
+#define WCP14_ETMIMPSPEC6(val) MCR14(val, 1, c0, c6, 7)
+#define WCP14_ETMIMPSPEC7(val) MCR14(val, 1, c0, c7, 7)
+/* Can be read only in ETMv3.4, ETMv3.5 */
+#define WCP14_ETMSYNCFR(val) MCR14(val, 1, c0, c8, 7)
+#define WCP14_ETMEXTINSELR(val) MCR14(val, 1, c0, c11, 7)
+#define WCP14_ETMTESSEICR(val) MCR14(val, 1, c0, c12, 7)
+#define WCP14_ETMEIBCR(val) MCR14(val, 1, c0, c13, 7)
+#define WCP14_ETMTSEVR(val) MCR14(val, 1, c0, c14, 7)
+#define WCP14_ETMAUXCR(val) MCR14(val, 1, c0, c15, 7)
+#define WCP14_ETMTRACEIDR(val) MCR14(val, 1, c1, c0, 0)
+#define WCP14_ETMIDR2(val) MCR14(val, 1, c1, c2, 0)
+#define WCP14_ETMVMIDCVR(val) MCR14(val, 1, c1, c0, 1)
+#define WCP14_ETMOSLAR(val) MCR14(val, 1, c1, c0, 4)
+/* Not available in PFTv1.1 */
+#define WCP14_ETMOSSRR(val) MCR14(val, 1, c1, c2, 4)
+#define WCP14_ETMPDCR(val) MCR14(val, 1, c1, c4, 4)
+#define WCP14_ETMPDSR(val) MCR14(val, 1, c1, c5, 4)
+#define WCP14_ETMITCTRL(val) MCR14(val, 1, c7, c0, 4)
+#define WCP14_ETMCLAIMSET(val) MCR14(val, 1, c7, c8, 6)
+#define WCP14_ETMCLAIMCLR(val) MCR14(val, 1, c7, c9, 6)
+/* Writes to this from CP14 interface are ignored */
+#define WCP14_ETMLAR(val) MCR14(val, 1, c7, c12, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644
index 22c689255e6e..000000000000
--- a/arch/arm/include/asm/hardware/debug-8250.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/debug-8250.S
- *
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/serial_reg.h>
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
- tst \rd, #UART_MSR_CTS
- beq 1001b
-#endif
- .endm
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S
deleted file mode 100644
index f9fd083eff63..000000000000
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/* arch/arm/include/asm/hardware/debug-pl01x.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-#include <linux/amba/serial.h>
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UART01x_DR]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UART01x_FR]
- tst \rd, #UART01x_FR_TXFF
- bne 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #UART01x_FR]
- tst \rd, #UART01x_FR_BUSY
- bne 1001b
- .endm
diff --git a/arch/arm/include/asm/hardware/dec21285.h b/arch/arm/include/asm/hardware/dec21285.h
index 0d7552751aaf..894f2a635cbb 100644
--- a/arch/arm/include/asm/hardware/dec21285.h
+++ b/arch/arm/include/asm/hardware/dec21285.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/dec21285.h
*
* Copyright (C) 1998 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* DC21285 registers
*/
#define DC21285_PCI_IACK 0x79000000
@@ -25,6 +22,13 @@
#define DC21285_IO(x) (x)
#endif
+/*
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
+ * would clash with VGA cards.
+ */
+#define BUS_OFFSET 0xe0000000
+
#define CSR_PCICMD DC21285_IO(0x0004)
#define CSR_CLASSREV DC21285_IO(0x0008)
#define CSR_PCICACHELINESIZE DC21285_IO(0x000c)
@@ -84,19 +88,6 @@
#define SA110_CNTL_XCSDIR(x) ((x)<<28)
#define SA110_CNTL_PCICFN (1 << 31)
-/*
- * footbridge_cfn_mode() is used when we want
- * to check whether we are the central function
- */
-#define __footbridge_cfn_mode() (*CSR_SA110_CNTL & SA110_CNTL_PCICFN)
-#if defined(CONFIG_FOOTBRIDGE_HOST) && defined(CONFIG_FOOTBRIDGE_ADDIN)
-#define footbridge_cfn_mode() __footbridge_cfn_mode()
-#elif defined(CONFIG_FOOTBRIDGE_HOST)
-#define footbridge_cfn_mode() (1)
-#else
-#define footbridge_cfn_mode() (0)
-#endif
-
#define CSR_PCIADDR_EXTN DC21285_IO(0x0140)
#define CSR_PREFETCHMEMRANGE DC21285_IO(0x0144)
#define CSR_XBUS_CYCLE DC21285_IO(0x0148)
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
deleted file mode 100644
index 8c215acd9b57..000000000000
--- a/arch/arm/include/asm/hardware/entry-macro-iomd.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-iomd.S
- *
- * Low-level IRQ helper macros for IOC/IOMD based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-/* IOC / IOMD based hardware */
-#include <asm/hardware/iomd.h>
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
- ldr \tmp, =irq_prio_h
- teq \irqstat, #0
-#ifdef IOMD_BASE
- ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma
- addeq \tmp, \tmp, #256 @ irq_prio_h table size
- teqeq \irqstat, #0
- bne 2406f
-#endif
- ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
- addeq \tmp, \tmp, #256 @ irq_prio_d table size
- teqeq \irqstat, #0
-#ifdef IOMD_IRQREQC
- ldreqb \irqstat, [\base, #IOMD_IRQREQC]
- addeq \tmp, \tmp, #256 @ irq_prio_l table size
- teqeq \irqstat, #0
-#endif
-#ifdef IOMD_IRQREQD
- ldreqb \irqstat, [\base, #IOMD_IRQREQD]
- addeq \tmp, \tmp, #256 @ irq_prio_lc table size
- teqeq \irqstat, #0
-#endif
-2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number
- .endm
-
-/*
- * Interrupt table (incorporates priority). Please note that we
- * rely on the order of these tables (see above code).
- */
- .align 5
-irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
-#ifdef IOMD_BASE
-irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
-#endif
-irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
-#ifdef IOMD_IRQREQC
-irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
-#endif
-#ifdef IOMD_IRQREQD
-irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
-#endif
-
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
deleted file mode 100644
index 794220b087d2..000000000000
--- a/arch/arm/include/asm/hardware/icst.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST
- * clock generators. See http://www.idt.com/ for more information
- * on these devices.
- */
-#ifndef ASMARM_HARDWARE_ICST_H
-#define ASMARM_HARDWARE_ICST_H
-
-struct icst_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned long vco_min; /* exclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
- const unsigned char *s2div; /* chip specific s2div array */
- const unsigned char *idx2s; /* chip specific idx2s array */
-};
-
-struct icst_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
-struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
-
-/*
- * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
- * This frequency is pre-output divider.
- */
-#define ICST307_VCO_MIN 6000000
-#define ICST307_VCO_MAX 200000000
-
-extern const unsigned char icst307_s2div[];
-extern const unsigned char icst307_idx2s[];
-
-/*
- * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
- * This frequency is pre-output divider.
- */
-#define ICST525_VCO_MIN 10000000
-#define ICST525_VCO_MAX_3V 200000000
-#define ICST525_VCO_MAX_5V 320000000
-
-extern const unsigned char icst525_s2div[];
-extern const unsigned char icst525_idx2s[];
-
-#endif
diff --git a/arch/arm/include/asm/hardware/ioc.h b/arch/arm/include/asm/hardware/ioc.h
index 1f6b8013becb..6edd27fcd048 100644
--- a/arch/arm/include/asm/hardware/ioc.h
+++ b/arch/arm/include/asm/hardware/ioc.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/ioc.h
*
* Copyright (C) Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Use these macros to read/write the IOC. All it does is perform the actual
* read/write.
*/
diff --git a/arch/arm/include/asm/hardware/iomd.h b/arch/arm/include/asm/hardware/iomd.h
index f9ee69e4f53e..53006ba5350f 100644
--- a/arch/arm/include/asm/hardware/iomd.h
+++ b/arch/arm/include/asm/hardware/iomd.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/iomd.h
*
* Copyright (C) 1999 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains information out the IOMD ASIC used in the
* Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
*/
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
deleted file mode 100644
index 9b28f1243bdc..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ /dev/null
@@ -1,962 +0,0 @@
-/*
- * Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef _ADMA_H
-#define _ADMA_H
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iop_adma.h>
-
-/* Memory copy units */
-#define DMA_CCR(chan) (chan->mmr_base + 0x0)
-#define DMA_CSR(chan) (chan->mmr_base + 0x4)
-#define DMA_DAR(chan) (chan->mmr_base + 0xc)
-#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
-#define DMA_PADR(chan) (chan->mmr_base + 0x14)
-#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
-#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
-#define DMA_BCR(chan) (chan->mmr_base + 0x20)
-#define DMA_DCR(chan) (chan->mmr_base + 0x24)
-
-/* Application accelerator unit */
-#define AAU_ACR(chan) (chan->mmr_base + 0x0)
-#define AAU_ASR(chan) (chan->mmr_base + 0x4)
-#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
-#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
-#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
-#define AAU_DAR(chan) (chan->mmr_base + 0x20)
-#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
-#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
-#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
-#define AAU_EDCR0_IDX 8
-#define AAU_EDCR1_IDX 17
-#define AAU_EDCR2_IDX 26
-
-#define DMA0_ID 0
-#define DMA1_ID 1
-#define AAU_ID 2
-
-struct iop3xx_aau_desc_ctrl {
- unsigned int int_en:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int blk_ctrl:2;
- unsigned int dual_xor_en:1;
- unsigned int tx_complete:1;
- unsigned int zero_result_err:1;
- unsigned int zero_result_en:1;
- unsigned int dest_write_en:1;
-};
-
-struct iop3xx_aau_e_desc_ctrl {
- unsigned int reserved:1;
- unsigned int blk1_cmd_ctrl:3;
- unsigned int blk2_cmd_ctrl:3;
- unsigned int blk3_cmd_ctrl:3;
- unsigned int blk4_cmd_ctrl:3;
- unsigned int blk5_cmd_ctrl:3;
- unsigned int blk6_cmd_ctrl:3;
- unsigned int blk7_cmd_ctrl:3;
- unsigned int blk8_cmd_ctrl:3;
- unsigned int reserved2:7;
-};
-
-struct iop3xx_dma_desc_ctrl {
- unsigned int pci_transaction:4;
- unsigned int int_en:1;
- unsigned int dac_cycle_en:1;
- unsigned int mem_to_mem_en:1;
- unsigned int crc_data_tx_en:1;
- unsigned int crc_gen_en:1;
- unsigned int crc_seed_dis:1;
- unsigned int reserved:21;
- unsigned int crc_tx_complete:1;
-};
-
-struct iop3xx_desc_dma {
- u32 next_desc;
- union {
- u32 pci_src_addr;
- u32 pci_dest_addr;
- u32 src_addr;
- };
- union {
- u32 upper_pci_src_addr;
- u32 upper_pci_dest_addr;
- };
- union {
- u32 local_pci_src_addr;
- u32 local_pci_dest_addr;
- u32 dest_addr;
- };
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_dma_desc_ctrl desc_ctrl_field;
- };
- u32 crc_addr;
-};
-
-struct iop3xx_desc_aau {
- u32 next_desc;
- u32 src[4];
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- } src_edc[31];
-};
-
-struct iop3xx_aau_gfmr {
- unsigned int gfmr1:8;
- unsigned int gfmr2:8;
- unsigned int gfmr3:8;
- unsigned int gfmr4:8;
-};
-
-struct iop3xx_desc_pq_xor {
- u32 next_desc;
- u32 src[3];
- union {
- u32 data_mult1;
- struct iop3xx_aau_gfmr data_mult1_field;
- };
- u32 dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- union {
- u32 src_addr;
- u32 e_desc_ctrl;
- struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
- u32 data_multiplier;
- struct iop3xx_aau_gfmr data_mult_field;
- u32 reserved;
- } src_edc_gfmr[19];
-};
-
-struct iop3xx_desc_dual_xor {
- u32 next_desc;
- u32 src0_addr;
- u32 src1_addr;
- u32 h_src_addr;
- u32 d_src_addr;
- u32 h_dest_addr;
- u32 byte_count;
- union {
- u32 desc_ctrl;
- struct iop3xx_aau_desc_ctrl desc_ctrl_field;
- };
- u32 d_dest_addr;
-};
-
-union iop3xx_desc {
- struct iop3xx_desc_aau *aau;
- struct iop3xx_desc_dma *dma;
- struct iop3xx_desc_pq_xor *pq_xor;
- struct iop3xx_desc_dual_xor *dual_xor;
- void *ptr;
-};
-
-/* No support for p+q operations */
-static inline int
-iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr, unsigned char coef)
-{
- BUG();
-}
-
-static inline int
-iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
-{
- BUG();
- return 0;
-}
-
-static inline void
-iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- BUG();
-}
-
-#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
-
-static inline void
-iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
- dma_addr_t *src)
-{
- BUG();
-}
-
-static inline int iop_adma_get_max_xor(void)
-{
- return 32;
-}
-
-static inline int iop_adma_get_max_pq(void)
-{
- BUG();
- return 0;
-}
-
-static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- return __raw_readl(DMA_DAR(chan));
- case AAU_ID:
- return __raw_readl(AAU_ADAR(chan));
- default:
- BUG();
- }
- return 0;
-}
-
-static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
- u32 next_desc_addr)
-{
- int id = chan->device->id;
-
- switch (id) {
- case DMA0_ID:
- case DMA1_ID:
- __raw_writel(next_desc_addr, DMA_NDAR(chan));
- break;
- case AAU_ID:
- __raw_writel(next_desc_addr, AAU_ANDAR(chan));
- break;
- }
-
-}
-
-#define IOP_ADMA_STATUS_BUSY (1 << 10)
-#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
-#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
-#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
-
-static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
-}
-
-static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
- int num_slots)
-{
- /* num_slots will only ever be 1, 2, 4, or 8 */
- return (desc->idx & (num_slots - 1)) ? 0 : 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-/* to do: support large (i.e. > hw max) buffer sizes */
-static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
-{
- *slots_per_op = 1;
- return 1;
-}
-
-static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- static const char slot_count_table[] = {
- 1, 1, 1, 1, /* 01 - 04 */
- 2, 2, 2, 2, /* 05 - 08 */
- 4, 4, 4, 4, /* 09 - 12 */
- 4, 4, 4, 4, /* 13 - 16 */
- 8, 8, 8, 8, /* 17 - 20 */
- 8, 8, 8, 8, /* 21 - 24 */
- 8, 8, 8, 8, /* 25 - 28 */
- 8, 8, 8, 8, /* 29 - 32 */
- };
- *slots_per_op = slot_count_table[src_cnt - 1];
- return *slots_per_op;
-}
-
-static inline int
-iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return iop_chan_memcpy_slot_count(0, slots_per_op);
- case AAU_ID:
- return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
- default:
- BUG();
- }
- return 0;
-}
-
-static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
- * descriptors
- */
-static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
- return slot_cnt;
-
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- slot_cnt += *slots_per_op;
- }
-
- slot_cnt += *slots_per_op;
-
- return slot_cnt;
-}
-
-static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
-{
- return 0;
-}
-
-static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->dest_addr;
- case AAU_ID:
- return hw_desc.aau->dest_addr;
- default:
- BUG();
- }
- return 0;
-}
-
-
-static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- BUG();
- return 0;
-}
-
-static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->byte_count;
- case AAU_ID:
- return hw_desc.aau->byte_count;
- default:
- BUG();
- }
- return 0;
-}
-
-/* translate the src_idx to a descriptor word index */
-static inline int __desc_idx(int src_idx)
-{
- static const int desc_idx_table[] = { 0, 0, 0, 0,
- 0, 1, 2, 3,
- 5, 6, 7, 8,
- 9, 10, 11, 12,
- 14, 15, 16, 17,
- 18, 19, 20, 21,
- 23, 24, 25, 26,
- 27, 28, 29, 30,
- };
-
- return desc_idx_table[src_idx];
-}
-
-static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- int src_idx)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->src_addr;
- case AAU_ID:
- break;
- default:
- BUG();
- }
-
- if (src_idx < 4)
- return hw_desc.aau->src[src_idx];
- else
- return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
-}
-
-static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
- int src_idx, dma_addr_t addr)
-{
- if (src_idx < 4)
- hw_desc->src[src_idx] = addr;
- else
- hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
-}
-
-static inline void
-iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_dma_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.mem_to_mem_en = 1;
- u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
- hw_desc->upper_pci_src_addr = 0;
- hw_desc->crc_addr = 0;
-}
-
-static inline void
-iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline u32
-iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
- unsigned long flags)
-{
- int i, shift;
- u32 edcr;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- edcr = 0;
- shift = 1;
- for (i = 24; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
- src_cnt = 24;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- edcr = 0;
- shift = 1;
- for (i = 16; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
- src_cnt = 16;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- edcr = 0;
- shift = 1;
- for (i = 8; i < src_cnt; i++) {
- edcr |= (1 << shift);
- shift += 3;
- }
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
- src_cnt = 8;
- /* fall through */
- case 2 ... 8:
- shift = 1;
- for (i = 0; i < src_cnt; i++) {
- u_desc_ctrl.value |= (1 << shift);
- shift += 3;
- }
-
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-
- return u_desc_ctrl.value;
-}
-
-static inline void
-iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
-}
-
-/* return the number of operations */
-static inline int
-iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
- int i, j;
-
- hw_desc = desc->hw_desc;
-
- for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, j++) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.zero_result_en = 1;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- iter->desc_ctrl = u_desc_ctrl.value;
-
- /* for the subsequent descriptors preserve the store queue
- * and chain them together
- */
- if (i) {
- prev_hw_desc =
- iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
- prev_hw_desc->next_desc =
- (u32) (desc->async_tx.phys + (i << 5));
- }
- }
-
- return j;
-}
-
-static inline void
-iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
- unsigned long flags)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop3xx_aau_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = 0;
- switch (src_cnt) {
- case 25 ... 32:
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 17 ... 24:
- if (!u_desc_ctrl.field.blk_ctrl) {
- hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
- }
- hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 9 ... 16:
- if (!u_desc_ctrl.field.blk_ctrl)
- u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
- hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
- /* fall through */
- case 1 ... 8:
- if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
- u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
- }
-
- u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
- hw_desc->desc_ctrl = u_desc_ctrl.value;
-}
-
-static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- u32 byte_count)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->byte_count = byte_count;
- break;
- case AAU_ID:
- hw_desc.aau->byte_count = byte_count;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- iop_desc_init_memcpy(desc, 1);
- hw_desc.dma->byte_count = 0;
- hw_desc.dma->dest_addr = 0;
- hw_desc.dma->src_addr = 0;
- break;
- case AAU_ID:
- iop_desc_init_null_xor(desc, 2, 1);
- hw_desc.aau->byte_count = 0;
- hw_desc.aau->dest_addr = 0;
- hw_desc.aau->src[0] = 0;
- hw_desc.aau->src[1] = 0;
- break;
- default:
- BUG();
- }
-}
-
-static inline void
-iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
-{
- int slots_per_op = desc->slots_per_op;
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int i = 0;
-
- if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- hw_desc->byte_count = len;
- } else {
- do {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
- i += slots_per_op;
- } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
-
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iter->byte_count = len;
- }
-}
-
-static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan,
- dma_addr_t addr)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- hw_desc.dma->dest_addr = addr;
- break;
- case AAU_ID:
- hw_desc.aau->dest_addr = addr;
- break;
- default:
- BUG();
- }
-}
-
-static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
- dma_addr_t addr)
-{
- struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
- hw_desc->src_addr = addr;
-}
-
-static inline void
-iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
- dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
- int src_idx, dma_addr_t addr)
-{
-
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
- int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
- int i;
-
- for (i = 0; (slot_cnt -= slots_per_op) >= 0;
- i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
- iter = iop_hw_desc_slot_idx(hw_desc, i);
- iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
- }
-}
-
-static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
- u32 next_desc_addr)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- iop_paranoia(hw_desc.dma->next_desc);
- hw_desc.dma->next_desc = next_desc_addr;
-}
-
-static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- return hw_desc.dma->next_desc;
-}
-
-static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
-{
- /* hw_desc->next_desc is the same location for all channels */
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
- hw_desc.dma->next_desc = 0;
-}
-
-static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
- u32 val)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- hw_desc->src[0] = val;
-}
-
-static inline enum sum_check_flags
-iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
-{
- struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
- struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
-
- iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
- return desc_ctrl.zero_result_err << SUM_CHECK_P;
-}
-
-static inline void iop_chan_append(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl;
-
- dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl |= 0x2;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
-{
- return __raw_readl(DMA_CSR(chan));
-}
-
-static inline void iop_chan_disable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
- dma_chan_ctrl &= ~1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_chan_enable(struct iop_adma_chan *chan)
-{
- u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
-
- dma_chan_ctrl |= 1;
- __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
-}
-
-static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 9);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
- status &= (1 << 8);
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
-{
- u32 status = __raw_readl(DMA_CSR(chan));
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
- break;
- case AAU_ID:
- status &= (1 << 5);
- break;
- default:
- BUG();
- }
-
- __raw_writel(status, DMA_CSR(chan));
-}
-
-static inline int
-iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return 0;
-}
-
-static inline int
-iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- return test_bit(5, &status);
-}
-
-static inline int
-iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(2, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(3, &status);
- default:
- return 0;
- }
-}
-
-static inline int
-iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
-{
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return test_bit(1, &status);
- default:
- return 0;
- }
-}
-#endif /* _ADMA_H */
diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h
deleted file mode 100644
index 9eda7dc92ad8..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx-gpio.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/iop3xx-gpio.h
- *
- * IOP3xx GPIO wrappers
- *
- * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org>
- * Based on IXP4XX gpio.h file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
-#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
-
-#include <mach/hardware.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-#define IOP3XX_N_GPIOS 8
-
-static inline int gpio_get_value(unsigned gpio)
-{
- if (gpio > IOP3XX_N_GPIOS)
- return __gpio_get_value(gpio);
-
- return gpio_line_get(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (gpio > IOP3XX_N_GPIOS) {
- __gpio_set_value(gpio, value);
- return;
- }
- gpio_line_set(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- if (gpio < IOP3XX_N_GPIOS)
- return 0;
- else
- return __gpio_cansleep(gpio);
-}
-
-/*
- * The GPIOs are not generating any interrupt
- * Note : manuals are not clear about this
- */
-static inline int gpio_to_irq(int gpio)
-{
- return -EINVAL;
-}
-
-static inline int irq_to_gpio(int gpio)
-{
- return -EINVAL;
-}
-
-#endif
-
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
deleted file mode 100644
index 423744bf18eb..000000000000
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/iop3xx.h
- *
- * Intel IOP32X and IOP33X register definitions
- *
- * Author: Rory Bolt <rorybolt@pacbell.net>
- * Copyright (C) 2002 Rory Bolt
- * Copyright (C) 2004 Intel Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __IOP3XX_H
-#define __IOP3XX_H
-
-/*
- * IOP3XX GPIO handling
- */
-#define GPIO_IN 0
-#define GPIO_OUT 1
-#define GPIO_LOW 0
-#define GPIO_HIGH 1
-#define IOP3XX_GPIO_LINE(x) (x)
-
-#ifndef __ASSEMBLY__
-extern void gpio_line_config(int line, int direction);
-extern int gpio_line_get(int line);
-extern void gpio_line_set(int line, int value);
-extern int init_atu;
-extern int iop3xx_get_init_atu(void);
-#endif
-
-
-/*
- * IOP3XX processor registers
- */
-#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
-#define IOP3XX_PERIPHERAL_SIZE 0x00002000
-#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PERIPHERAL_UPPER_VA (IOP3XX_PERIPHERAL_VIRT_BASE +\
- IOP3XX_PERIPHERAL_SIZE - 1)
-#define IOP3XX_PMMR_PHYS_TO_VIRT(addr) (u32) ((u32) (addr) -\
- (IOP3XX_PERIPHERAL_PHYS_BASE\
- - IOP3XX_PERIPHERAL_VIRT_BASE))
-#define IOP3XX_REG_ADDR(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + (reg))
-
-/* Address Translation Unit */
-#define IOP3XX_ATUVID (volatile u16 *)IOP3XX_REG_ADDR(0x0100)
-#define IOP3XX_ATUDID (volatile u16 *)IOP3XX_REG_ADDR(0x0102)
-#define IOP3XX_ATUCMD (volatile u16 *)IOP3XX_REG_ADDR(0x0104)
-#define IOP3XX_ATUSR (volatile u16 *)IOP3XX_REG_ADDR(0x0106)
-#define IOP3XX_ATURID (volatile u8 *)IOP3XX_REG_ADDR(0x0108)
-#define IOP3XX_ATUCCR (volatile u32 *)IOP3XX_REG_ADDR(0x0109)
-#define IOP3XX_ATUCLSR (volatile u8 *)IOP3XX_REG_ADDR(0x010c)
-#define IOP3XX_ATULT (volatile u8 *)IOP3XX_REG_ADDR(0x010d)
-#define IOP3XX_ATUHTR (volatile u8 *)IOP3XX_REG_ADDR(0x010e)
-#define IOP3XX_ATUBIST (volatile u8 *)IOP3XX_REG_ADDR(0x010f)
-#define IOP3XX_IABAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0110)
-#define IOP3XX_IAUBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0114)
-#define IOP3XX_IABAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0118)
-#define IOP3XX_IAUBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x011c)
-#define IOP3XX_IABAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0120)
-#define IOP3XX_IAUBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0124)
-#define IOP3XX_ASVIR (volatile u16 *)IOP3XX_REG_ADDR(0x012c)
-#define IOP3XX_ASIR (volatile u16 *)IOP3XX_REG_ADDR(0x012e)
-#define IOP3XX_ERBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0130)
-#define IOP3XX_ATUILR (volatile u8 *)IOP3XX_REG_ADDR(0x013c)
-#define IOP3XX_ATUIPR (volatile u8 *)IOP3XX_REG_ADDR(0x013d)
-#define IOP3XX_ATUMGNT (volatile u8 *)IOP3XX_REG_ADDR(0x013e)
-#define IOP3XX_ATUMLAT (volatile u8 *)IOP3XX_REG_ADDR(0x013f)
-#define IOP3XX_IALR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0140)
-#define IOP3XX_IATVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0144)
-#define IOP3XX_ERLR (volatile u32 *)IOP3XX_REG_ADDR(0x0148)
-#define IOP3XX_ERTVR (volatile u32 *)IOP3XX_REG_ADDR(0x014c)
-#define IOP3XX_IALR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0150)
-#define IOP3XX_IALR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0154)
-#define IOP3XX_IATVR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0158)
-#define IOP3XX_OIOWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x015c)
-#define IOP3XX_OMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0160)
-#define IOP3XX_OUMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0164)
-#define IOP3XX_OMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0168)
-#define IOP3XX_OUMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x016c)
-#define IOP3XX_OUDWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x0178)
-#define IOP3XX_ATUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0180)
-#define IOP3XX_PCSR (volatile u32 *)IOP3XX_REG_ADDR(0x0184)
-#define IOP3XX_ATUISR (volatile u32 *)IOP3XX_REG_ADDR(0x0188)
-#define IOP3XX_ATUIMR (volatile u32 *)IOP3XX_REG_ADDR(0x018c)
-#define IOP3XX_IABAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0190)
-#define IOP3XX_IAUBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0194)
-#define IOP3XX_IALR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0198)
-#define IOP3XX_IATVR3 (volatile u32 *)IOP3XX_REG_ADDR(0x019c)
-#define IOP3XX_OCCAR (volatile u32 *)IOP3XX_REG_ADDR(0x01a4)
-#define IOP3XX_OCCDR (volatile u32 *)IOP3XX_REG_ADDR(0x01ac)
-#define IOP3XX_PDSCR (volatile u32 *)IOP3XX_REG_ADDR(0x01bc)
-#define IOP3XX_PMCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01c0)
-#define IOP3XX_PMNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01c1)
-#define IOP3XX_APMCR (volatile u16 *)IOP3XX_REG_ADDR(0x01c2)
-#define IOP3XX_APMCSR (volatile u16 *)IOP3XX_REG_ADDR(0x01c4)
-#define IOP3XX_PCIXCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01e0)
-#define IOP3XX_PCIXNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01e1)
-#define IOP3XX_PCIXCMD (volatile u16 *)IOP3XX_REG_ADDR(0x01e2)
-#define IOP3XX_PCIXSR (volatile u32 *)IOP3XX_REG_ADDR(0x01e4)
-#define IOP3XX_PCIIRSR (volatile u32 *)IOP3XX_REG_ADDR(0x01ec)
-#define IOP3XX_PCSR_OUT_Q_BUSY (1 << 15)
-#define IOP3XX_PCSR_IN_Q_BUSY (1 << 14)
-#define IOP3XX_ATUCR_OUT_EN (1 << 1)
-
-#define IOP3XX_INIT_ATU_DEFAULT 0
-#define IOP3XX_INIT_ATU_DISABLE -1
-#define IOP3XX_INIT_ATU_ENABLE 1
-
-/* Messaging Unit */
-#define IOP3XX_IMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0310)
-#define IOP3XX_IMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0314)
-#define IOP3XX_OMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0318)
-#define IOP3XX_OMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x031c)
-#define IOP3XX_IDR (volatile u32 *)IOP3XX_REG_ADDR(0x0320)
-#define IOP3XX_IISR (volatile u32 *)IOP3XX_REG_ADDR(0x0324)
-#define IOP3XX_IIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0328)
-#define IOP3XX_ODR (volatile u32 *)IOP3XX_REG_ADDR(0x032c)
-#define IOP3XX_OISR (volatile u32 *)IOP3XX_REG_ADDR(0x0330)
-#define IOP3XX_OIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0334)
-#define IOP3XX_MUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0350)
-#define IOP3XX_QBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0354)
-#define IOP3XX_IFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0360)
-#define IOP3XX_IFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0364)
-#define IOP3XX_IPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0368)
-#define IOP3XX_IPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x036c)
-#define IOP3XX_OFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0370)
-#define IOP3XX_OFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0374)
-#define IOP3XX_OPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0378)
-#define IOP3XX_OPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x037c)
-#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
-
-/* DMA Controller */
-#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
- (0x400 + (chan << 6)))
-#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
-
-/* Peripheral bus interface */
-#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
-#define IOP3XX_PBISR (volatile u32 *)IOP3XX_REG_ADDR(0x0684)
-#define IOP3XX_PBBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0688)
-#define IOP3XX_PBLR0 (volatile u32 *)IOP3XX_REG_ADDR(0x068c)
-#define IOP3XX_PBBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0690)
-#define IOP3XX_PBLR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0694)
-#define IOP3XX_PBBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0698)
-#define IOP3XX_PBLR2 (volatile u32 *)IOP3XX_REG_ADDR(0x069c)
-#define IOP3XX_PBBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a0)
-#define IOP3XX_PBLR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a4)
-#define IOP3XX_PBBAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06a8)
-#define IOP3XX_PBLR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06ac)
-#define IOP3XX_PBBAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b0)
-#define IOP3XX_PBLR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b4)
-#define IOP3XX_PMBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x06c0)
-#define IOP3XX_PMBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x06e0)
-#define IOP3XX_PMBR2 (volatile u32 *)IOP3XX_REG_ADDR(0x06e4)
-
-/* Peripheral performance monitoring unit */
-#define IOP3XX_GTMR (volatile u32 *)IOP3XX_REG_ADDR(0x0700)
-#define IOP3XX_ESR (volatile u32 *)IOP3XX_REG_ADDR(0x0704)
-#define IOP3XX_EMISR (volatile u32 *)IOP3XX_REG_ADDR(0x0708)
-#define IOP3XX_GTSR (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-/* PERCR0 DOESN'T EXIST - index from 1! */
-#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
-
-/* General Purpose I/O */
-#define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0000)
-#define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0004)
-#define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x0008)
-
-/* Timers */
-#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
-#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
-#define IOP3XX_TU_TCR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0008)
-#define IOP3XX_TU_TCR1 (volatile u32 *)IOP3XX_TIMER_REG(0x000c)
-#define IOP3XX_TU_TRR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0010)
-#define IOP3XX_TU_TRR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0014)
-#define IOP3XX_TU_TISR (volatile u32 *)IOP3XX_TIMER_REG(0x0018)
-#define IOP3XX_TU_WDTCR (volatile u32 *)IOP3XX_TIMER_REG(0x001c)
-#define IOP_TMR_EN 0x02
-#define IOP_TMR_RELOAD 0x04
-#define IOP_TMR_PRIVILEGED 0x08
-#define IOP_TMR_RATIO_1_1 0x00
-
-/* Watchdog timer definitions */
-#define IOP_WDTCR_EN_ARM 0x1e1e1e1e
-#define IOP_WDTCR_EN 0xe1e1e1e1
-/* iop3xx does not support stopping the watchdog, so we just re-arm */
-#define IOP_WDTCR_DIS_ARM (IOP_WDTCR_EN_ARM)
-#define IOP_WDTCR_DIS (IOP_WDTCR_EN)
-
-/* Application accelerator unit */
-#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
-#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
-
-/* I2C bus interface unit */
-#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
-#define IOP3XX_ISR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1684)
-#define IOP3XX_ISAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1688)
-#define IOP3XX_IDBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x168c)
-#define IOP3XX_IBMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1694)
-#define IOP3XX_ICR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a0)
-#define IOP3XX_ISR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a4)
-#define IOP3XX_ISAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a8)
-#define IOP3XX_IDBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16ac)
-#define IOP3XX_IBMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16b4)
-
-
-/*
- * IOP3XX I/O and Mem space regions for PCI autoconfiguration
- */
-#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
-#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
-
-#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
-#define IOP3XX_PCI_LOWER_IO_BA 0x00000000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <linux/reboot.h>
-
-void iop3xx_map_io(void);
-void iop_init_cp6_handler(void);
-void iop_init_time(unsigned long tickrate);
-void iop3xx_restart(enum reboot_mode, const char *);
-
-static inline u32 read_tmr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tmr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c0, c1, 0" : : "r" (val));
-}
-
-static inline void write_tmr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c1, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr0(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c2, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_tcr1(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c3, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void write_tcr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr0(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
-}
-
-static inline void write_trr1(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c5, c1, 0" : : "r" (val));
-}
-
-static inline void write_tisr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
-}
-
-static inline u32 read_wdtcr(void)
-{
- u32 val;
- asm volatile("mrc p6, 0, %0, c7, c1, 0":"=r" (val));
- return val;
-}
-static inline void write_wdtcr(u32 val)
-{
- asm volatile("mcr p6, 0, %0, c7, c1, 0"::"r" (val));
-}
-
-extern unsigned long get_iop_tick_rate(void);
-
-/* only iop13xx has these registers, we define these to present a
- * common register interface for the iop_wdt driver.
- */
-#define IOP_RCSR_WDT (0)
-static inline u32 read_rcsr(void)
-{
- return 0;
-}
-static inline void write_wdtsr(u32 val)
-{
- do { } while (0);
-}
-
-extern struct platform_device iop3xx_dma_0_channel;
-extern struct platform_device iop3xx_dma_1_channel;
-extern struct platform_device iop3xx_aau_channel;
-extern struct platform_device iop3xx_i2c0_device;
-extern struct platform_device iop3xx_i2c1_device;
-
-#endif
-
-
-#endif
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
deleted file mode 100644
index 122f86d8c991..000000000000
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef IOP_ADMA_H
-#define IOP_ADMA_H
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-
-#define IOP_ADMA_SLOT_SIZE 32
-#define IOP_ADMA_THRESHOLD 4
-#ifdef DEBUG
-#define IOP_PARANOIA 1
-#else
-#define IOP_PARANOIA 0
-#endif
-#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x))
-
-/**
- * struct iop_adma_device - internal representation of an ADMA device
- * @pdev: Platform device
- * @id: HW ADMA Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
-struct iop_adma_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
-};
-
-/**
- * struct iop_adma_chan - internal representation of an ADMA device
- * @pending: allows batching of hardware operations
- * @lock: serializes enqueue/dequeue operations to the slot pool
- * @mmr_base: memory mapped register base
- * @chain: device chain view of the descriptors
- * @device: parent device
- * @common: common dmaengine channel object members
- * @last_used: place holder for allocation to continue from where it left off
- * @all_slots: complete domain of slots usable by the channel
- * @slots_allocated: records the actual size of the descriptor slot pool
- * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
- */
-struct iop_adma_chan {
- int pending;
- spinlock_t lock; /* protects the descriptor slot pool */
- void __iomem *mmr_base;
- struct list_head chain;
- struct iop_adma_device *device;
- struct dma_chan common;
- struct iop_adma_desc_slot *last_used;
- struct list_head all_slots;
- int slots_allocated;
- struct tasklet_struct irq_tasklet;
-};
-
-/**
- * struct iop_adma_desc_slot - IOP-ADMA software descriptor
- * @slot_node: node on the iop_adma_chan.all_slots list
- * @chain_node: node on the op_adma_chan.chain list
- * @hw_desc: virtual address of the hardware descriptor chain
- * @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @slots_per_op: number of slots per operation
- * @idx: pool index
- * @unmap_src_cnt: number of xor sources
- * @unmap_len: transaction bytecount
- * @tx_list: list of descriptors that are associated with one operation
- * @async_tx: support for the async_tx api
- * @group_list: list of slots that make up a multi-descriptor transaction
- * for example transfer lengths larger than the supported hw max
- * @xor_check_result: result of zero sum
- * @crc32_result: result crc calculation
- */
-struct iop_adma_desc_slot {
- struct list_head slot_node;
- struct list_head chain_node;
- void *hw_desc;
- struct iop_adma_desc_slot *group_head;
- u16 slot_cnt;
- u16 slots_per_op;
- u16 idx;
- u16 unmap_src_cnt;
- size_t unmap_len;
- struct list_head tx_list;
- struct dma_async_tx_descriptor async_tx;
- union {
- u32 *xor_check_result;
- u32 *crc32_result;
- u32 *pq_check_result;
- };
-};
-
-struct iop_adma_platform_data {
- int hw_id;
- dma_cap_mask_t cap_mask;
- size_t pool_size;
-};
-
-#define to_iop_sw_desc(addr_hw_desc) \
- container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
-#define iop_hw_desc_slot_idx(hw_desc, idx) \
- ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
-#endif
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
deleted file mode 100644
index d36a73d7c0e8..000000000000
--- a/arch/arm/include/asm/hardware/it8152.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * linux/include/arm/hardware/it8152.h
- *
- * Copyright Compulab Ltd., 2006,2007
- * Mike Rapoport <mike@compulab.co.il>
- *
- * ITE 8152 companion chip register definitions
- */
-
-#ifndef __ASM_HARDWARE_IT8152_H
-#define __ASM_HARDWARE_IT8152_H
-
-#include <mach/irqs.h>
-
-extern void __iomem *it8152_base_address;
-
-#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
-#define IT8152_CFGREG_BASE (it8152_base_address + 0x03f00000)
-
-#define __REG_IT8152(x) (it8152_base_address + (x))
-
-#define IT8152_PCI_CFG_ADDR __REG_IT8152(0x3f00800)
-#define IT8152_PCI_CFG_DATA __REG_IT8152(0x3f00804)
-
-#define IT8152_INTC_LDCNIRR __REG_IT8152(0x3f00300)
-#define IT8152_INTC_LDPNIRR __REG_IT8152(0x3f00304)
-#define IT8152_INTC_LDCNIMR __REG_IT8152(0x3f00308)
-#define IT8152_INTC_LDPNIMR __REG_IT8152(0x3f0030C)
-#define IT8152_INTC_LDNITR __REG_IT8152(0x3f00310)
-#define IT8152_INTC_LDNIAR __REG_IT8152(0x3f00314)
-#define IT8152_INTC_LPCNIRR __REG_IT8152(0x3f00320)
-#define IT8152_INTC_LPPNIRR __REG_IT8152(0x3f00324)
-#define IT8152_INTC_LPCNIMR __REG_IT8152(0x3f00328)
-#define IT8152_INTC_LPPNIMR __REG_IT8152(0x3f0032C)
-#define IT8152_INTC_LPNITR __REG_IT8152(0x3f00330)
-#define IT8152_INTC_LPNIAR __REG_IT8152(0x3f00334)
-#define IT8152_INTC_PDCNIRR __REG_IT8152(0x3f00340)
-#define IT8152_INTC_PDPNIRR __REG_IT8152(0x3f00344)
-#define IT8152_INTC_PDCNIMR __REG_IT8152(0x3f00348)
-#define IT8152_INTC_PDPNIMR __REG_IT8152(0x3f0034C)
-#define IT8152_INTC_PDNITR __REG_IT8152(0x3f00350)
-#define IT8152_INTC_PDNIAR __REG_IT8152(0x3f00354)
-#define IT8152_INTC_INTC_TYPER __REG_IT8152(0x3f003FC)
-
-#define IT8152_GPIO_GPDR __REG_IT8152(0x3f00500)
-
-/*
- Interrupt controller per register summary:
- ---------------------------------------
- LCDNIRR:
- IT8152_LD_IRQ(8) PCICLK stop
- IT8152_LD_IRQ(7) MCLK ready
- IT8152_LD_IRQ(6) s/w
- IT8152_LD_IRQ(5) UART
- IT8152_LD_IRQ(4) GPIO
- IT8152_LD_IRQ(3) TIMER 4
- IT8152_LD_IRQ(2) TIMER 3
- IT8152_LD_IRQ(1) TIMER 2
- IT8152_LD_IRQ(0) TIMER 1
-
- LPCNIRR:
- IT8152_LP_IRQ(x) serial IRQ x
-
- PCIDNIRR:
- IT8152_PD_IRQ(14) PCISERR
- IT8152_PD_IRQ(13) CPU/PCI bridge target abort (h2pTADR)
- IT8152_PD_IRQ(12) CPU/PCI bridge master abort (h2pMADR)
- IT8152_PD_IRQ(11) PCI INTD
- IT8152_PD_IRQ(10) PCI INTC
- IT8152_PD_IRQ(9) PCI INTB
- IT8152_PD_IRQ(8) PCI INTA
- IT8152_PD_IRQ(7) serial INTD
- IT8152_PD_IRQ(6) serial INTC
- IT8152_PD_IRQ(5) serial INTB
- IT8152_PD_IRQ(4) serial INTA
- IT8152_PD_IRQ(3) serial IRQ IOCHK (IOCHKR)
- IT8152_PD_IRQ(2) chaining DMA (CDMAR)
- IT8152_PD_IRQ(1) USB (USBR)
- IT8152_PD_IRQ(0) Audio controller (ACR)
- */
-#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
-#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
-
-/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
-#define IT8152_LD_IRQ_COUNT 9
-#define IT8152_LP_IRQ_COUNT 16
-#define IT8152_PD_IRQ_COUNT 15
-
-/* Priorities: */
-#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
-#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
-#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
-
-/* frequently used interrupts */
-#define IT8152_PCISERR IT8152_PD_IRQ(14)
-#define IT8152_H2PTADR IT8152_PD_IRQ(13)
-#define IT8152_H2PMAR IT8152_PD_IRQ(12)
-#define IT8152_PCI_INTD IT8152_PD_IRQ(11)
-#define IT8152_PCI_INTC IT8152_PD_IRQ(10)
-#define IT8152_PCI_INTB IT8152_PD_IRQ(9)
-#define IT8152_PCI_INTA IT8152_PD_IRQ(8)
-#define IT8152_CDMA_INT IT8152_PD_IRQ(2)
-#define IT8152_USB_INT IT8152_PD_IRQ(1)
-#define IT8152_AUDIO_INT IT8152_PD_IRQ(0)
-
-struct pci_dev;
-struct pci_sys_data;
-
-extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
-extern void it8152_init_irq(void);
-extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_ops it8152_ops;
-
-#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 74e51d6bd93f..3190e1e5067a 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -158,8 +158,6 @@
#define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4)
#define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7))
-extern struct bus_type locomo_bus_type;
-
#define LOCOMO_DEVID_KEYBOARD 0
#define LOCOMO_DEVID_FRONTLIGHT 1
#define LOCOMO_DEVID_BACKLIGHT 2
@@ -188,16 +186,14 @@ struct locomo_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct locomo_dev *);
- int (*remove)(struct locomo_dev *);
- int (*suspend)(struct locomo_dev *, pm_message_t);
- int (*resume)(struct locomo_dev *);
+ void (*remove)(struct locomo_dev *);
};
-#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
+#define LOCOMO_DRV(_d) container_of_const((_d), struct locomo_driver, drv)
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
-void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+extern void locomolcd_power(int on);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
diff --git a/arch/arm/include/asm/hardware/memc.h b/arch/arm/include/asm/hardware/memc.h
index 42ba7c167d1f..1d4ebe0a9678 100644
--- a/arch/arm/include/asm/hardware/memc.h
+++ b/arch/arm/include/asm/hardware/memc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/memc.h
*
* Copyright (C) Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define VDMA_ALIGNMENT PAGE_SIZE
#define VDMA_XFERSIZE 16
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 7c2bbc7f0be1..90b6a832108d 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/hardware/sa1111.h
*
@@ -12,35 +13,6 @@
#ifndef _ASM_ARCH_SA1111
#define _ASM_ARCH_SA1111
-#include <mach/bitfield.h>
-
-/*
- * The SA1111 is always located at virtual 0xf4000000, and is always
- * "native" endian.
- */
-
-#define SA1111_VBASE 0xf4000000
-
-/* Don't use these! */
-#define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE)
-#define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE)
-
-#ifndef __ASSEMBLY__
-#define _SA1111(x) ((x) + sa1111->resource.start)
-#endif
-
-#define sa1111_writel(val,addr) __raw_writel(val, addr)
-#define sa1111_readl(addr) __raw_readl(addr)
-
-/*
- * 26 bits of the SA-1110 address bus are available to the SA-1111.
- * Use these when feeding target addresses to the DMA engines.
- */
-
-#define SA1111_ADDR_WIDTH (26)
-#define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1)
-#define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK)
-
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
@@ -396,7 +368,7 @@
-extern struct bus_type sa1111_bus_type;
+extern const struct bus_type sa1111_bus_type;
#define SA1111_DEVID_SBI (1 << 0)
#define SA1111_DEVID_SK (1 << 1)
@@ -416,11 +388,11 @@ struct sa1111_dev {
struct resource res;
void __iomem *mapbase;
unsigned int skpcr_mask;
- unsigned int irq[6];
+ unsigned int hwirq[6];
u64 dma_mask;
};
-#define SA1111_DEV(_d) container_of((_d), struct sa1111_dev, dev)
+#define to_sa1111_device(x) container_of(x, struct sa1111_dev, dev)
#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
@@ -429,13 +401,10 @@ struct sa1111_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct sa1111_dev *);
- int (*remove)(struct sa1111_dev *);
- int (*suspend)(struct sa1111_dev *, pm_message_t);
- int (*resume)(struct sa1111_dev *);
- void (*shutdown)(struct sa1111_dev *);
+ void (*remove)(struct sa1111_dev *);
};
-#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
+#define SA1111_DRV(_d) container_of_const((_d), struct sa1111_driver, drv)
#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
@@ -446,6 +415,8 @@ struct sa1111_driver {
int sa1111_enable_device(struct sa1111_dev *);
void sa1111_disable_device(struct sa1111_dev *);
+int sa1111_get_irq(struct sa1111_dev *, unsigned num);
+
unsigned int sa1111_pll_clock(struct sa1111_dev *);
#define SA1111_AUDIO_ACLINK 0
@@ -460,10 +431,6 @@ int sa1111_check_dma_bug(dma_addr_t addr);
int sa1111_driver_register(struct sa1111_driver *);
void sa1111_driver_unregister(struct sa1111_driver *);
-void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir);
-void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
-void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
-
struct sa1111_platform_data {
int irq_base; /* base for cascaded on-chip IRQs */
unsigned disable_devs;
diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h
index 58cdf5d84122..505453315287 100644
--- a/arch/arm/include/asm/hardware/scoop.h
+++ b/arch/arm/include/asm/hardware/scoop.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define SCOOP_MCR 0x00
diff --git a/arch/arm/include/asm/hardware/ssp.h b/arch/arm/include/asm/hardware/ssp.h
index 3b42e181997c..72d176790308 100644
--- a/arch/arm/include/asm/hardware/ssp.h
+++ b/arch/arm/include/asm/hardware/ssp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ssp.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef SSP_H
#define SSP_H
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h
deleted file mode 100644
index bb28af7c32de..000000000000
--- a/arch/arm/include/asm/hardware/timer-sp.h
+++ /dev/null
@@ -1,23 +0,0 @@
-struct clk;
-
-void __sp804_clocksource_and_sched_clock_init(void __iomem *,
- const char *, struct clk *, int);
-void __sp804_clockevents_init(void __iomem *, unsigned int,
- struct clk *, const char *);
-
-static inline void sp804_clocksource_init(void __iomem *base, const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
-}
-
-static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
- const char *name)
-{
- __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
-}
-
-static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
-{
- __sp804_clockevents_init(base, irq, NULL, name);
-
-}
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 91b99abe7a95..bdb209e002a4 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
-#include <asm/kmap_types.h>
+#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
@@ -9,8 +11,6 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
-
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
@@ -19,9 +19,6 @@
extern pte_t *pkmap_page_table;
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
@@ -49,25 +46,33 @@ extern void kunmap_high(struct page *page);
#endif
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-extern void *kmap_high_get(struct page *page);
-#else
-static inline void *kmap_high_get(struct page *page)
+extern void *kmap_high_get(const struct page *page);
+
+static inline void *arch_kmap_local_high_get(const struct page *page)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
+ return NULL;
+ return kmap_high_get(page);
+}
+#define arch_kmap_local_high_get arch_kmap_local_high_get
+
+#else /* ARCH_NEEDS_KMAP_HIGH_GET */
+static inline void *kmap_high_get(const struct page *page)
{
return NULL;
}
-#endif
+#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
-/*
- * The following functions are already defined by <linux/highmem.h>
- * when CONFIG_HIGHMEM is not set.
- */
-#ifdef CONFIG_HIGHMEM
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
-extern struct page *kmap_atomic_to_page(const void *ptr);
-#endif
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+do { \
+ if (cache_is_vivt()) \
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
+} while (0)
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_page(vaddr)
#endif
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
index d4014fbe5ea3..87d48e2d90ad 100644
--- a/arch/arm/include/asm/hugetlb-3level.h
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hugetlb-3level.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
@@ -25,11 +13,12 @@
/*
* If our huge pte is non-zero then mark the valid bit.
- * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * This allows pte_present(huge_ptep_get(mm,addr,ptep)) to return true for non-zero
* ptes.
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
*/
-static inline pte_t huge_ptep_get(pte_t *ptep)
+#define __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t retval = *ptep;
if (pte_val(retval))
@@ -37,35 +26,4 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return retval;
}
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- set_pte_at(mm, addr, ptep, pte);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
-
#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index 1f1b1cd112f3..700055b1ccb3 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -1,84 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hugetlb.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_H
#define _ASM_ARM_HUGETLB_H
+#include <asm/cacheflush.h>
#include <asm/page.h>
-#include <asm-generic/hugetlb.h>
-
#include <asm/hugetlb-3level.h>
+#include <asm-generic/hugetlb.h>
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor,
- unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-
-
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr, unsigned long len)
-{
- return 0;
-}
-
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- return 0;
-}
-
-static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
-{
-}
-
-static inline int huge_pte_none(pte_t pte)
-{
- return pte_none(pte);
-}
-
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-
-static inline int arch_prepare_hugepage(struct page *page)
-{
- return 0;
-}
-
-static inline void arch_release_hugepage(struct page *page)
-{
-}
-
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &page->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index eef55ea9ef00..e7f9961c53b2 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_HW_BREAKPOINT_H
#define _ARM_HW_BREAKPOINT_H
@@ -51,6 +52,10 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
+#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_1 7
+#define ARM_DEBUG_ARCH_V8_2 8
+#define ARM_DEBUG_ARCH_V8_4 9
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
@@ -79,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
+#define ARM_ENTRY_CFI_BREAKPOINT 0x3
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */
@@ -109,15 +115,17 @@ static inline void decode_ctrl_reg(u32 reg,
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
+struct perf_event_attr;
struct notifier_block;
struct perf_event;
struct pmu;
-extern struct pmu perf_ops_bp;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index a71b417b1856..cecc13214ef1 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Nothing to see here yet
*/
@@ -8,14 +9,9 @@ static inline void ack_bad_irq(int irq)
{
extern unsigned long irq_err_count;
irq_err_count++;
+ pr_crit("unexpected IRQ trap at vector %02x\n", irq);
}
-void set_irq_flags(unsigned int irq, unsigned int flags);
-
-#define IRQF_VALID (1 << 0)
-#define IRQF_PROBE (1 << 1)
-#define IRQF_NOAUTOEN (1 << 2)
-
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#endif
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1f..e31d9f1b8549 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_HWCAP_H
#define __ASMARM_HWCAP_H
@@ -9,6 +10,7 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-extern unsigned int elf_hwcap;
+#define ELF_HWCAP2 (elf_hwcap2)
+extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index b90d9e523d6f..8a648e506540 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -1,6 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_HYPERVISOR_H
#define _ASM_ARM_HYPERVISOR_H
#include <asm/xen/hypervisor.h>
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
+static inline void kvm_arch_init_hyp_services(void) { };
+
#endif
diff --git a/arch/arm/include/asm/ide.h b/arch/arm/include/asm/ide.h
deleted file mode 100644
index b507ce8e5019..000000000000
--- a/arch/arm/include/asm/ide.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/include/asm/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the ARM architecture specific IDE code.
- */
-
-#ifndef __ASMARM_IDE_H
-#define __ASMARM_IDE_H
-
-#ifdef __KERNEL__
-
-#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
-#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
-#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
-#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMARM_IDE_H */
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..baebb67b3512 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
-#define __idmap __section(.idmap.text) noinline notrace
+#define __idmap __section(".idmap.text") noinline notrace
extern pgd_t *idmap_pgd;
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
new file mode 100644
index 000000000000..faf3d1c28368
--- /dev/null
+++ b/arch/arm/include/asm/insn.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+#include <linux/types.h>
+
+/*
+ * Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
+ * appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
+ * which should be sufficient for the core kernel as well as modules loaded
+ * into the module region. (Not supported by LLD before release 14)
+ */
+#define LOAD_SYM_ARMV6(reg, sym) \
+ " .globl " #sym " \n\t" \
+ " .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
+ " .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
+ " .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
+ "10: sub " #reg ", pc, #8 \n\t" \
+ "11: sub " #reg ", " #reg ", #4 \n\t" \
+ "12: ldr " #reg ", [" #reg ", #0] \n\t"
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ return 0xf3af8000; /* nop.w */
+#else
+ return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, false, true);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
+{
+ return __arm_gen_branch(pc, addr, true, warn);
+}
+
+#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741b2b37..bae5edf348ef 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/io.h
*
* Copyright (C) 1996-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Modifications:
* 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
* constant addresses and variable addresses.
@@ -23,29 +20,35 @@
#ifdef __KERNEL__
+#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm-generic/pci_iomap.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
+/*
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
-extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
-extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
-extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
+void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
+void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
+void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
-extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
-extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
-extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
+void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
+void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
#if __LINUX_ARM_ARCH__ < 6
/*
@@ -61,52 +64,55 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
+#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
- : "+Q" (*(volatile u16 __force *)addr)
- : "r" (val));
+ : : "Q" (*(volatile u16 __force *)addr), "r" (val));
}
+#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
- asm volatile("ldrh %1, %0"
- : "+Q" (*(volatile u16 __force *)addr),
- "=r" (val));
+ asm volatile("ldrh %0, %1"
+ : "=r" (val)
+ : "Q" (*(volatile u16 __force *)addr));
return val;
}
#endif
+#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
- : "+Qo" (*(volatile u8 __force *)addr)
- : "r" (val));
+ : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}
+#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
- : "+Qo" (*(volatile u32 __force *)addr)
- : "r" (val));
+ : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}
+#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
- asm volatile("ldrb %1, %0"
- : "+Qo" (*(volatile u8 __force *)addr),
- "=r" (val));
+ asm volatile("ldrb %0, %1"
+ : "=r" (val)
+ : "Qo" (*(volatile u8 __force *)addr));
return val;
}
+#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
- asm volatile("ldr %1, %0"
- : "+Qo" (*(volatile u32 __force *)addr),
- "=r" (val));
+ asm volatile("ldr %0, %1"
+ : "=r" (val)
+ : "Qo" (*(volatile u32 __force *)addr));
return val;
}
@@ -128,20 +134,14 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
- size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
-
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
-extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
-extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
@@ -170,40 +170,41 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
/* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000
+#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
-extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
+void pci_ioremap_set_mem_type(int mem_type);
+#else
+static inline void pci_ioremap_set_mem_type(int mem_type) {}
+#endif
+struct resource;
+
+#define pci_remap_iospace pci_remap_iospace
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
+/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification does not allow configuration write
+ * transactions to be posted. Add an arch specific
+ * pci_remap_cfgspace() definition that is implemented
+ * through strongly ordered memory mappings.
+ */
+#define pci_remap_cfgspace pci_remap_cfgspace
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
-#elif defined(CONFIG_PCI)
-#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
-#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
-#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
-#endif
-
-/*
- * This is the limit of PC card/PCI/ISA IO space, which is by default
- * 64K if we have PC card, PCI or ISA support. Otherwise, default to
- * zero to prevent ISA/PCI drivers claiming IO space (and potentially
- * oopsing.)
- *
- * Only set this larger if you really need inb() et.al. to operate over
- * a larger address space. Note that SOC_COMMON ioremaps each sockets
- * IO space area, and so inb() et.al. must be defined to operate as per
- * readb() et.al. on such platforms.
- */
-#ifndef IO_SPACE_LIMIT
-#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
-#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#endif
/*
@@ -252,20 +253,6 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
#define insl(p,d,l) __raw_readsl(__io(p),d,l)
#endif
-#define outb_p(val,port) outb((val),(port))
-#define outw_p(val,port) outw((val),(port))
-#define outl_p(val,port) outl((val),(port))
-#define inb_p(port) inb((port))
-#define inw_p(port) inw((port))
-#define inl_p(port) inl((port))
-
-#define outsb_p(port,from,len) outsb(port,from,len)
-#define outsw_p(port,from,len) outsw(port,from,len)
-#define outsl_p(port,from,len) outsl(port,from,len)
-#define insb_p(port,to,len) insb(port,to,len)
-#define insw_p(port,to,len) insw(port,to,len)
-#define insl_p(port,to,len) insl(port,to,len)
-
/*
* String version of IO memory access ops:
*/
@@ -273,8 +260,6 @@ extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
extern void _memset_io(volatile void __iomem *, int, size_t);
-#define mmiowb()
-
/*
* Memory access primitives
* ------------------------
@@ -282,7 +267,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* These perform PCI memory accesses via an ioremap region. They don't
* take an address as such, but a cookie.
*
- * Again, this are defined to perform little endian accesses. See the
+ * Again, these are defined to perform little endian accesses. See the
* IO port primitives for more information.
*/
#ifndef readl
@@ -312,85 +297,128 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writesw(p,d,l) __raw_writesw(p,d,l)
#define writesl(p,d,l) __raw_writesl(p,d,l)
+#ifndef __ARMBE__
+static inline void memset_io(volatile void __iomem *dst, unsigned c,
+ size_t count)
+{
+ extern void mmioset(void *, unsigned int, size_t);
+ mmioset((void __force *)dst, c, count);
+}
+#define memset_io(dst,c,count) memset_io(dst,c,count)
+
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+ size_t count)
+{
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy(to, (const void __force *)from, count);
+}
+#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
+
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+ size_t count)
+{
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy((void __force *)to, from, count);
+}
+#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
+
+#else
#define memset_io(c,v,l) _memset_io(c,(v),(l))
#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
+#endif
#endif /* readl */
/*
- * ioremap and friends.
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size. Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function Memory type Cacheability Cache hint
+ * ioremap() Device n/a n/a
+ * ioremap_cache() Normal Writeback Read allocate
+ * ioremap_wc() Normal Non-cacheable n/a
+ * ioremap_wt() Normal Non-cacheable n/a
*
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
*
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ * instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
*/
-#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define iounmap __arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
/*
- * io{read,write}{8,16,32} macros
+ * Do not use ioremap_cache for mapping memory. Use memremap instead.
*/
-#ifndef ioread8
-#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
-#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
-#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
-#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
-#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
+void iounmap(volatile void __iomem *io_addr);
+#define iounmap iounmap
-#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
+#define arch_memremap_wb arch_memremap_wb
-#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
-#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
-#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c)
-#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c)
-#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c)
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+#ifndef ioport_map
+#define ioport_map ioport_map
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+#endif
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
extern void ioport_unmap(void __iomem *addr);
#endif
struct pci_dev;
+#define pci_iounmap pci_iounmap
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
-/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#include <asm-generic/io.h>
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
-extern int devmem_is_allowed(unsigned long pfn);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif
/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/*
* Register ISA memory and port locations for glibc iopl/inb/outb
* emulation.
*/
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 53c15dec7af6..26c1d2ced4ce 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_IRQ_H
#define __ASM_ARM_IRQ_H
@@ -24,17 +25,22 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
-extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
-void init_IRQ(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
+static inline int nr_legacy_irqs(void)
+{
+ return NR_IRQS_LEGACY;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h
new file mode 100644
index 000000000000..8895999834cc
--- /dev/null
+++ b/arch/arm/include/asm/irq_work.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARM_IRQ_WORK_H
+#define __ASM_ARM_IRQ_WORK_H
+
+#include <asm/smp_plat.h>
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return is_smp();
+}
+
+#endif /* _ASM_ARM_IRQ_WORK_H */
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 3b763d6652a0..aeec7f24eb75 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_IRQFLAGS_H
#define __ASM_ARM_IRQFLAGS_H
@@ -20,6 +21,7 @@
#if __LINUX_ARM_ARCH__ >= 6
+#define arch_local_irq_save arch_local_irq_save
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
@@ -31,6 +33,7 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
+#define arch_local_irq_enable arch_local_irq_enable
static inline void arch_local_irq_enable(void)
{
asm volatile(
@@ -40,6 +43,7 @@ static inline void arch_local_irq_enable(void)
: "memory", "cc");
}
+#define arch_local_irq_disable arch_local_irq_disable
static inline void arch_local_irq_disable(void)
{
asm volatile(
@@ -51,11 +55,20 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
+
+#ifndef CONFIG_CPU_V7M
+#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
+#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
+#else
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
+#endif
#else
/*
* Save the current interrupt enable state & disable IRQs
*/
+#define arch_local_irq_save arch_local_irq_save
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags, temp;
@@ -73,6 +86,7 @@ static inline unsigned long arch_local_irq_save(void)
/*
* Enable IRQs
*/
+#define arch_local_irq_enable arch_local_irq_enable
static inline void arch_local_irq_enable(void)
{
unsigned long temp;
@@ -88,6 +102,7 @@ static inline void arch_local_irq_enable(void)
/*
* Disable IRQs
*/
+#define arch_local_irq_disable arch_local_irq_disable
static inline void arch_local_irq_disable(void)
{
unsigned long temp;
@@ -130,11 +145,14 @@ static inline void arch_local_irq_disable(void)
: "memory", "cc"); \
})
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
#endif
/*
* Save the current interrupt enable state.
*/
+#define arch_local_save_flags arch_local_save_flags
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
@@ -147,6 +165,7 @@ static inline unsigned long arch_local_save_flags(void)
/*
* restore saved IRQ & FIQ state
*/
+#define arch_local_irq_restore arch_local_irq_restore
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(
@@ -156,10 +175,13 @@ static inline void arch_local_irq_restore(unsigned long flags)
: "memory", "cc");
}
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags & IRQMASK_I_BIT;
}
+#include <asm-generic/irqflags.h>
+
#endif /* ifdef __KERNEL__ */
#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..a35aba7f548c 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -1,35 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
-#include <asm/system.h>
+#include <asm/unified.h>
#define JUMP_LABEL_NOP_SIZE 4
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP "nop.w"
-#else
-#define JUMP_LABEL_NOP "nop"
-#endif
+/* This macro is also expanded on the Rust side. */
+#define ARCH_STATIC_BRANCH_ASM(key, label) \
+ "1:\n\t" \
+ WASM(nop) "\n\t" \
+ ".pushsection __jump_table, \"aw\"\n\t" \
+ ".word 1b, " label ", " key "\n\t" \
+ ".popsection\n\t" \
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm goto(ARCH_STATIC_BRANCH_ASM("%c0", "%l[l_yes]")
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm goto("1:\n\t"
- JUMP_LABEL_NOP "\n\t"
+ WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -38,4 +49,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 000000000000..303c35df3135
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * The compiler uses a shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+asmlinkage void kasan_early_init(void);
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 000000000000..5739605aa7cf
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
+ * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
+ * addressable by a 32bit architecture) out of the virtual address
+ * space to use as shadow memory for KASan as follows:
+ *
+ * +----+ 0xffffffff
+ * | | \
+ * | | |-> Static kernel image (vmlinux) BSS and page table
+ * | |/
+ * +----+ PAGE_OFFSET
+ * | | \
+ * | | |-> Loadable kernel modules virtual address space area
+ * | |/
+ * +----+ MODULES_VADDR = KASAN_SHADOW_END
+ * | | \
+ * | | |-> The shadow area of kernel virtual address.
+ * | |/
+ * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULES_VADDR
+ * | | |
+ * | | |
+ * | | |-> The user space area in lowmem. The kernel address
+ * | | | sanitizer do not use this space, nor does it map it.
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | |/
+ * ------ 0
+ *
+ * 1) KASAN_SHADOW_START
+ * This value begins with the MODULE_VADDR's shadow address. It is the
+ * start of kernel virtual space. Since we have modules to load, we need
+ * to cover also that area with shadow memory so we can find memory
+ * bugs in modules.
+ *
+ * 2) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address: the mapping that would
+ * be after the end of the kernel memory at 0xffffffff. It is the end of
+ * kernel address sanitizer shadow area. It is also the start of the
+ * module area.
+ *
+ * 3) KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * As you would expect, >> 3 is equal to dividing by 8, meaning each
+ * byte in the shadow memory covers 8 bytes of kernel memory, so one
+ * bit shadow memory per byte of kernel memory is used.
+ *
+ * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
+ * on the VMSPLIT layout of the system: the kernel and userspace can
+ * split up lowmem in different ways according to needs, so we calculate
+ * the shadow offset depending on this.
+ */
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index c2b9b4bdec00..a8287e7ab9d4 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H
-#ifdef CONFIG_KEXEC
-
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
@@ -19,6 +18,11 @@
#ifndef __ASSEMBLY__
+#define ARCH_HAS_KIMAGE_ARCH
+struct kimage_arch {
+ u32 kernel_r2;
+};
+
/**
* crash_setup_regs() - save registers for the panic kernel
* @newregs: registers are saved here
@@ -50,11 +54,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
}
-/* Function pointer to optional machine-specific reinitialization */
-extern void (*kexec_reinit)(void);
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+ return phys_to_idmap(phys);
+}
+#define phys_to_boot_phys phys_to_boot_phys
-#endif /* __ASSEMBLY__ */
+static inline phys_addr_t boot_phys_to_phys(unsigned long entry)
+{
+ return idmap_to_phys(entry);
+}
+#define boot_phys_to_phys boot_phys_to_phys
-#endif /* CONFIG_KEXEC */
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+ return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
+}
+#define page_to_boot_pfn page_to_boot_pfn
+
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+ return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT));
+}
+#define boot_pfn_to_page boot_pfn_to_page
+
+#endif /* __ASSEMBLY__ */
#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644
index 000000000000..7980d0f2271f
--- /dev/null
+++ b/arch/arm/include/asm/kfence.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+ int i;
+ unsigned long pfn = PFN_DOWN(__pa(addr));
+ pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+ if (!pte)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+ pmd_t *pmd;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ pmd = pmd_off_k(addr);
+
+ if (pmd_leaf(*pmd)) {
+ if (split_pmd_page(pmd, addr & PMD_MASK))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ set_memory_valid(addr, 1, !protect);
+
+ return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..8de1100d1067 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM KGDB support
*
@@ -11,6 +12,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +43,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
@@ -75,7 +77,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
-#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
+#define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
deleted file mode 100644
index 83eb2f772911..000000000000
--- a/arch/arm/include/asm/kmap_types.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ARM_KMAP_TYPES_H
-#define __ARM_KMAP_TYPES_H
-
-/*
- * This is the "bare minimum". AIO seems to require this.
- */
-#define KM_TYPE_NR 16
-
-#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index f82ec22eeb11..5b8dbf1b0be4 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -1,48 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/kprobes.h
*
* Copyright (C) 2006, 2007 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KPROBES_H
#define _ARM_KPROBES_H
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/percpu.h>
+#include <linux/notifier.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
-#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
typedef u32 kprobe_opcode_t;
-
struct kprobe;
-typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
-typedef unsigned long (kprobe_check_cc)(unsigned long);
-typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
-typedef void (kprobe_insn_fn_t)(void);
+#include <asm/probes.h>
-/* Architecture specific copy of original instruction. */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
- kprobe_insn_handler_t *insn_handler;
- kprobe_check_cc *insn_check_cc;
- kprobe_insn_singlestep_t *insn_singlestep;
- kprobe_insn_fn_t *insn_fn;
-};
+#define arch_specific_insn arch_probes_insn
struct prev_kprobe {
struct kprobe *kp;
@@ -53,14 +36,43 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
+/* optinsn template addresses */
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
+
+#define MAX_OPTIMIZED_LENGTH 4
+#define MAX_OPTINSN_SIZE \
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
+#define RELATIVEJUMP_SIZE 4
+
+struct arch_optimized_insn {
+ /*
+ * copy of the original instructions.
+ * Different from x86, ARM kprobe_opcode_t is u32.
+ */
+#define MAX_COPIED_INSN DIV_ROUND_UP(RELATIVEJUMP_SIZE, sizeof(kprobe_opcode_t))
+ kprobe_opcode_t copied_insn[MAX_COPIED_INSN];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /*
+ * We always copy one instruction on ARM,
+ * so size will always be 4, and unlike x86, there is no
+ * need for a size field.
+ */
+};
+#endif /* CONFIG_KPROBES */
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 000000000000..a5f2cdd6445f
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
deleted file mode 100644
index 64e96960de29..000000000000
--- a/arch/arm/include/asm/kvm_arm.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_ARM_H__
-#define __ARM_KVM_ARM_H__
-
-#include <linux/types.h>
-
-/* Hyp Configuration Register (HCR) bits */
-#define HCR_TGE (1 << 27)
-#define HCR_TVM (1 << 26)
-#define HCR_TTLB (1 << 25)
-#define HCR_TPU (1 << 24)
-#define HCR_TPC (1 << 23)
-#define HCR_TSW (1 << 22)
-#define HCR_TAC (1 << 21)
-#define HCR_TIDCP (1 << 20)
-#define HCR_TSC (1 << 19)
-#define HCR_TID3 (1 << 18)
-#define HCR_TID2 (1 << 17)
-#define HCR_TID1 (1 << 16)
-#define HCR_TID0 (1 << 15)
-#define HCR_TWE (1 << 14)
-#define HCR_TWI (1 << 13)
-#define HCR_DC (1 << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (1 << 10)
-#define HCR_FB (1 << 9)
-#define HCR_VA (1 << 8)
-#define HCR_VI (1 << 7)
-#define HCR_VF (1 << 6)
-#define HCR_AMO (1 << 5)
-#define HCR_IMO (1 << 4)
-#define HCR_FMO (1 << 3)
-#define HCR_PTW (1 << 2)
-#define HCR_SWIO (1 << 1)
-#define HCR_VM 1
-
-/*
- * The bits we set in HCR:
- * TAC: Trap ACTLR
- * TSC: Trap SMC
- * TSW: Trap cache operations by set/way
- * TWI: Trap WFI
- * TIDCP: Trap L2CTLR/L2ECTLR
- * BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
- * AMO: Override CPSR.A and enable signaling with VA
- * IMO: Override CPSR.I and enable signaling with VI
- * FMO: Override CPSR.F and enable signaling with VF
- * SWIO: Turn set/way invalidates into set/way clean+invalidate
- */
-#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_SWIO | HCR_TIDCP)
-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
-
-/* System Control Register (SCTLR) bits */
-#define SCTLR_TE (1 << 30)
-#define SCTLR_EE (1 << 25)
-#define SCTLR_V (1 << 13)
-
-/* Hyp System Control Register (HSCTLR) bits */
-#define HSCTLR_TE (1 << 30)
-#define HSCTLR_EE (1 << 25)
-#define HSCTLR_FI (1 << 21)
-#define HSCTLR_WXN (1 << 19)
-#define HSCTLR_I (1 << 12)
-#define HSCTLR_C (1 << 2)
-#define HSCTLR_A (1 << 1)
-#define HSCTLR_M 1
-#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
- HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
-
-/* TTBCR and HTCR Registers bits */
-#define TTBCR_EAE (1 << 31)
-#define TTBCR_IMP (1 << 30)
-#define TTBCR_SH1 (3 << 28)
-#define TTBCR_ORGN1 (3 << 26)
-#define TTBCR_IRGN1 (3 << 24)
-#define TTBCR_EPD1 (1 << 23)
-#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (3 << 16)
-#define TTBCR_SH0 (3 << 12)
-#define TTBCR_ORGN0 (3 << 10)
-#define TTBCR_IRGN0 (3 << 8)
-#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ 3
-#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
-
-/* Hyp System Trap Register */
-#define HSTR_T(x) (1 << x)
-#define HSTR_TTEE (1 << 16)
-#define HSTR_TJDBX (1 << 17)
-
-/* Hyp Coprocessor Trap Register */
-#define HCPTR_TCP(x) (1 << x)
-#define HCPTR_TCP_MASK (0x3fff)
-#define HCPTR_TASE (1 << 15)
-#define HCPTR_TTA (1 << 20)
-#define HCPTR_TCPAC (1 << 31)
-
-/* Hyp Debug Configuration Register bits */
-#define HDCR_TDRA (1 << 11)
-#define HDCR_TDOSA (1 << 10)
-#define HDCR_TDA (1 << 9)
-#define HDCR_TDE (1 << 8)
-#define HDCR_HPME (1 << 7)
-#define HDCR_TPM (1 << 6)
-#define HDCR_TPMCR (1 << 5)
-#define HDCR_HPMN_MASK (0x1F)
-
-/*
- * The architecture supports 40-bit IPA as input to the 2nd stage translations
- * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
- * space.
- */
-#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
-#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
-
-/* Virtualization Translation Control Register (VTCR) bits */
-#define VTCR_SH0 (3 << 12)
-#define VTCR_ORGN0 (3 << 10)
-#define VTCR_IRGN0 (3 << 8)
-#define VTCR_SL0 (3 << 6)
-#define VTCR_S (1 << 4)
-#define VTCR_T0SZ (0xf)
-#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
- VTCR_S | VTCR_T0SZ)
-#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
-#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
-#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
-#define KVM_VTCR_SL0 VTCR_SL_L1
-/* stage-2 input address range defined as 2^(32-T0SZ) */
-#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
-#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
-#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
-
-/* Virtualization Translation Table Base Register (VTTBR) bits */
-#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
-#define VTTBR_X (14 - KVM_T0SZ)
-#else
-#define VTTBR_X (5 - KVM_T0SZ)
-#endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT (48LLU)
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
-
-/* Hyp Syndrome Register (HSR) bits */
-#define HSR_EC_SHIFT (26)
-#define HSR_EC (0x3fU << HSR_EC_SHIFT)
-#define HSR_IL (1U << 25)
-#define HSR_ISS (HSR_IL - 1)
-#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (1U << HSR_ISV_SHIFT)
-#define HSR_SRT_SHIFT (16)
-#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
-#define HSR_FSC (0x3f)
-#define HSR_FSC_TYPE (0x3c)
-#define HSR_SSE (1 << 21)
-#define HSR_WNR (1 << 6)
-#define HSR_CV_SHIFT (24)
-#define HSR_CV (1U << HSR_CV_SHIFT)
-#define HSR_COND_SHIFT (20)
-#define HSR_COND (0xfU << HSR_COND_SHIFT)
-
-#define FSC_FAULT (0x04)
-#define FSC_PERM (0x0c)
-
-/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xf)
-
-#define HSR_EC_UNKNOWN (0x00)
-#define HSR_EC_WFI (0x01)
-#define HSR_EC_CP15_32 (0x03)
-#define HSR_EC_CP15_64 (0x04)
-#define HSR_EC_CP14_MR (0x05)
-#define HSR_EC_CP14_LS (0x06)
-#define HSR_EC_CP_0_13 (0x07)
-#define HSR_EC_CP10_ID (0x08)
-#define HSR_EC_JAZELLE (0x09)
-#define HSR_EC_BXJ (0x0A)
-#define HSR_EC_CP14_64 (0x0C)
-#define HSR_EC_SVC_HYP (0x11)
-#define HSR_EC_HVC (0x12)
-#define HSR_EC_SMC (0x13)
-#define HSR_EC_IABT (0x20)
-#define HSR_EC_IABT_HYP (0x21)
-#define HSR_EC_DABT (0x24)
-#define HSR_EC_DABT_HYP (0x25)
-
-#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
-
-#define HSR_DABT_S1PTW (1U << 7)
-#define HSR_DABT_CM (1U << 8)
-#define HSR_DABT_EA (1U << 9)
-
-#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
deleted file mode 100644
index a2f43ddcc300..000000000000
--- a/arch/arm/include/asm/kvm_asm.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_ASM_H__
-#define __ARM_KVM_ASM_H__
-
-/* 0 is reserved as an invalid value. */
-#define c0_MPIDR 1 /* MultiProcessor ID Register */
-#define c0_CSSELR 2 /* Cache Size Selection Register */
-#define c1_SCTLR 3 /* System Control Register */
-#define c1_ACTLR 4 /* Auxilliary Control Register */
-#define c1_CPACR 5 /* Coprocessor Access Control */
-#define c2_TTBR0 6 /* Translation Table Base Register 0 */
-#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
-#define c2_TTBR1 8 /* Translation Table Base Register 1 */
-#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
-#define c2_TTBCR 10 /* Translation Table Base Control R. */
-#define c3_DACR 11 /* Domain Access Control Register */
-#define c5_DFSR 12 /* Data Fault Status Register */
-#define c5_IFSR 13 /* Instruction Fault Status Register */
-#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
-#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
-#define c6_DFAR 16 /* Data Fault Address Register */
-#define c6_IFAR 17 /* Instruction Fault Address Register */
-#define c7_PAR 18 /* Physical Address Register */
-#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
-#define c10_PRRR 21 /* Primary Region Remap Register */
-#define c10_NMRR 22 /* Normal Memory Remap Register */
-#define c12_VBAR 23 /* Vector Base Address Register */
-#define c13_CID 24 /* Context ID Register */
-#define c13_TID_URW 25 /* Thread ID, User R/W */
-#define c13_TID_URO 26 /* Thread ID, User R/O */
-#define c13_TID_PRIV 27 /* Thread ID, Privileged */
-#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
-#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
-
-#define ARM_EXCEPTION_RESET 0
-#define ARM_EXCEPTION_UNDEFINED 1
-#define ARM_EXCEPTION_SOFTWARE 2
-#define ARM_EXCEPTION_PREF_ABORT 3
-#define ARM_EXCEPTION_DATA_ABORT 4
-#define ARM_EXCEPTION_IRQ 5
-#define ARM_EXCEPTION_FIQ 6
-#define ARM_EXCEPTION_HVC 7
-
-#ifndef __ASSEMBLY__
-struct kvm;
-struct kvm_vcpu;
-
-extern char __kvm_hyp_init[];
-extern char __kvm_hyp_init_end[];
-
-extern char __kvm_hyp_exit[];
-extern char __kvm_hyp_exit_end[];
-
-extern char __kvm_hyp_vector[];
-
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
-
-extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-
-extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-#endif
-
-#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
deleted file mode 100644
index 4917c2f7e459..000000000000
--- a/arch/arm/include/asm/kvm_coproc.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2012 Rusty Russell IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_COPROC_H__
-#define __ARM_KVM_COPROC_H__
-#include <linux/kvm_host.h>
-
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_coproc_target_table {
- unsigned target;
- const struct coproc_reg *table;
- size_t num;
-};
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-void kvm_coproc_table_init(void);
-
-struct kvm_one_reg;
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
deleted file mode 100644
index a464e8d7b6c5..000000000000
--- a/arch/arm/include/asm/kvm_emulate.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_EMULATE_H__
-#define __ARM_KVM_EMULATE_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_arm.h>
-
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
-
-bool kvm_condition_valid(struct kvm_vcpu *vcpu);
-void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
-void kvm_inject_undefined(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
-
-static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
-{
- return 1;
-}
-
-static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.regs.usr_regs.ARM_pc;
-}
-
-static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.regs.usr_regs.ARM_cpsr;
-}
-
-static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-}
-
-static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
- return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
-}
-
-static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;;
-}
-
-static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hsr;
-}
-
-static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hxfar;
-}
-
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
-{
- return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
-}
-
-static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hyp_pc;
-}
-
-static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
-}
-
-static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
-}
-
-static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-}
-
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
-}
-
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
-}
-
-/* Get Access Size from a data abort */
-static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
-{
- switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
- case 0:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return -EFAULT;
- }
-}
-
-/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
-}
-
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
-}
-
-static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
-}
-
-#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
deleted file mode 100644
index 7d22517d8071..000000000000
--- a/arch/arm/include/asm/kvm_host.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_HOST_H__
-#define __ARM_KVM_HOST_H__
-
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/fpstate.h>
-#include <kvm/arm_arch_timer.h>
-
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-#define KVM_HAVE_ONE_REG
-
-#define KVM_VCPU_MAX_FEATURES 1
-
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
-
-#include <kvm/arm_vgic.h>
-
-struct kvm_vcpu;
-u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_arch {
- /* VTTBR value associated with below pgd and vmid */
- u64 vttbr;
-
- /* Timer */
- struct arch_timer_kvm timer;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
-
- /* Stage-2 page table */
- pgd_t *pgd;
-
- /* Interrupt controller */
- struct vgic_dist vgic;
-};
-
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
-struct kvm_vcpu_fault_info {
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
- u32 hyp_pc; /* PC when exception was taken from Hyp mode */
-};
-
-typedef struct vfp_hard_struct kvm_cpu_context_t;
-
-struct kvm_vcpu_arch {
- struct kvm_regs regs;
-
- int target; /* Processor target */
- DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
- /* System control coprocessor (cp15) */
- u32 cp15[NR_CP15_REGS];
-
- /* The CPU type we expose to the VM */
- u32 midr;
-
- /* Exception Information */
- struct kvm_vcpu_fault_info fault;
-
- /* Floating point registers (VFP and Advanced SIMD/NEON) */
- struct vfp_hard_struct vfp_guest;
-
- /* Host FP context */
- kvm_cpu_context_t *host_cpu_context;
-
- /* VGIC state */
- struct vgic_cpu vgic_cpu;
- struct arch_timer_cpu timer_cpu;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
-
- /* Don't run the guest on this vcpu */
- bool pause;
-
- /* IO related fields */
- struct kvm_decode mmio_decode;
-
- /* Interrupt related fields */
- u32 irq_lines; /* IRQ and FIQ levels */
-
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
- /* Detect first run of a vcpu */
- bool has_run_once;
-};
-
-struct kvm_vm_stat {
- u32 remote_tlb_flush;
-};
-
-struct kvm_vcpu_stat {
- u32 halt_wakeup;
-};
-
-struct kvm_vcpu_init;
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-struct kvm_one_reg;
-int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-u64 kvm_call_hyp(void *hypfn, ...);
-void force_vm_exit(const cpumask_t *mask);
-
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-struct kvm;
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-
-/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
-
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-struct kvm_one_reg;
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
-
-static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
- phys_addr_t pgd_ptr,
- unsigned long hyp_stack_ptr,
- unsigned long vector_ptr)
-{
- /*
- * Call initialization code, and switch to the full blown HYP
- * code. The init code doesn't need to preserve these
- * registers as r0-r3 are already callee saved according to
- * the AAPCS.
- * Note that we slightly misuse the prototype by casing the
- * stack pointer to a void *.
- *
- * We don't have enough registers to perform the full init in
- * one go. Install the boot PGD first, and then install the
- * runtime PGD, stack pointer and vectors. The PGDs are always
- * passed as the third argument, in order to be passed into
- * r2-r3 to the init code (yes, this is compliant with the
- * PCS!).
- */
-
- kvm_call_hyp(NULL, 0, boot_pgd_ptr);
-
- kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
-}
-
-static inline int kvm_arch_dev_ioctl_check_extension(long ext)
-{
- return 0;
-}
-
-int kvm_perf_init(void);
-int kvm_perf_teardown(void);
-
-#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
deleted file mode 100644
index adcc0d7d3175..000000000000
--- a/arch/arm/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_MMIO_H__
-#define __ARM_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-struct kvm_decode {
- unsigned long rt;
- bool sign_extend;
-};
-
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
-
-#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
deleted file mode 100644
index 472ac7091003..000000000000
--- a/arch/arm/include/asm/kvm_mmu.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_MMU_H__
-#define __ARM_KVM_MMU_H__
-
-#include <asm/memory.h>
-#include <asm/page.h>
-
-/*
- * We directly use the kernel VA for the HYP, as we can directly share
- * the mapping (HTTBR "covers" TTBR1).
- */
-#define HYP_PAGE_OFFSET_MASK UL(~0)
-#define HYP_PAGE_OFFSET PAGE_OFFSET
-#define KERN_TO_HYP(kva) (kva)
-
-/*
- * Our virtual mapping for the boot-time MMU-enable code. Must be
- * shared across all the page-tables. Conveniently, we use the vectors
- * page, where no kernel data will ever be shared with HYP.
- */
-#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
-
-#ifndef __ASSEMBLY__
-
-#include <asm/cacheflush.h>
-#include <asm/pgalloc.h>
-
-int create_hyp_mappings(void *from, void *to);
-int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
-void free_boot_hyp_pgd(void);
-void free_hyp_pgds(void);
-
-int kvm_alloc_stage2_pgd(struct kvm *kvm);
-void kvm_free_stage2_pgd(struct kvm *kvm);
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size);
-
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
-
-phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_mmu_get_boot_httbr(void);
-phys_addr_t kvm_get_idmap_vector(void);
-int kvm_mmu_init(void);
-void kvm_clear_hyp_idmap(void);
-
-static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
- pte_val(*pte) = new_pte;
- /*
- * flush_pmd_entry just takes a void pointer and cleans the necessary
- * cache entries, so we can reuse the function for ptes.
- */
- flush_pmd_entry(pte);
-}
-
-static inline bool kvm_is_write_fault(unsigned long hsr)
-{
- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
- if (hsr_ec == HSR_EC_IABT)
- return false;
- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
- return false;
- else
- return true;
-}
-
-static inline void kvm_clean_pgd(pgd_t *pgd)
-{
- clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
-}
-
-static inline void kvm_clean_pmd_entry(pmd_t *pmd)
-{
- clean_pmd_entry(pmd);
-}
-
-static inline void kvm_clean_pte(pte_t *pte)
-{
- clean_pte_table(pte);
-}
-
-static inline void kvm_set_s2pte_writable(pte_t *pte)
-{
- pte_val(*pte) |= L_PTE_S2_RDWR;
-}
-
-struct kvm;
-
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
-{
- /*
- * If we are going to insert an instruction page and the icache is
- * either VIPT or PIPT, there is a potential problem where the host
- * (or another VM) may have used the same page as this guest, and we
- * read incorrect data from the icache. If we're using a PIPT cache,
- * we can invalidate just that page, but if we are using a VIPT cache
- * we need to invalidate the entire icache - damn shame - as written
- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
- *
- * VIVT caches are tagged using both the ASID and the VMID and doesn't
- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
- */
- if (icache_is_pipt()) {
- unsigned long hva = gfn_to_hva(kvm, gfn);
- __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
- } else if (!icache_is_vivt_asid_tagged()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
- }
-}
-
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
deleted file mode 100644
index 9a83d98bf170..000000000000
--- a/arch/arm/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_PSCI_H__
-#define __ARM_KVM_PSCI_H__
-
-bool kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/limits.h b/arch/arm/include/asm/limits.h
deleted file mode 100644
index 08d8c6600804..000000000000
--- a/arch/arm/include/asm/limits.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __ASM_PIPE_H
-#define __ASM_PIPE_H
-
-#ifndef PAGE_SIZE
-#include <asm/page.h>
-#endif
-
-#define PIPE_BUF PAGE_SIZE
-
-#endif
-
diff --git a/arch/arm/include/asm/linkage.h b/arch/arm/include/asm/linkage.h
index 5a25632b1bc0..c4670694ada7 100644
--- a/arch/arm/include/asm/linkage.h
+++ b/arch/arm/include/asm/linkage.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
deleted file mode 100644
index f77ffc1eb0c2..000000000000
--- a/arch/arm/include/asm/localtimer.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * arch/arm/include/asm/localtimer.h
- *
- * Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_LOCALTIMER_H
-#define __ASM_ARM_LOCALTIMER_H
-
-#include <linux/errno.h>
-
-struct clock_event_device;
-
-struct local_timer_ops {
- int (*setup)(struct clock_event_device *);
- void (*stop)(struct clock_event_device *);
-};
-
-#ifdef CONFIG_LOCAL_TIMERS
-/*
- * Register a local timer driver
- */
-int local_timer_register(struct local_timer_ops *);
-#else
-static inline int local_timer_register(struct local_timer_ops *ops)
-{
- return -ENXIO;
-}
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
deleted file mode 100644
index 948178cc6ba8..000000000000
--- a/arch/arm/include/asm/mach-types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 441efc491b50..2b18a258204d 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/arch.h
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
@@ -14,7 +11,6 @@
#include <linux/reboot.h>
struct tag;
-struct meminfo;
struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
@@ -35,7 +31,7 @@ struct machine_desc {
unsigned int nr_irqs; /* number of IRQs */
#ifdef CONFIG_ZONE_DMA
- unsigned long dma_zone_size; /* size of DMA-able area */
+ phys_addr_t dma_zone_size; /* size of DMA-able area */
#endif
unsigned int video_start; /* start of video RAM */
@@ -45,10 +41,14 @@ struct machine_desc {
unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */
enum reboot_mode reboot_mode; /* default restart mode */
- struct smp_operations *smp; /* SMP operations */
+ unsigned l2c_aux_val; /* L2 cache aux value */
+ unsigned l2c_aux_mask; /* L2 cache aux mask */
+ void (*l2c_write_sec)(unsigned long, unsigned);
+ const struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void);
- void (*fixup)(struct tag *, char **,
- struct meminfo *);
+ void (*fixup)(struct tag *, char **);
+ void (*dt_fixup)(void);
+ long long (*pv_fixup)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
@@ -56,21 +56,18 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- void (*handle_irq)(struct pt_regs *);
-#endif
void (*restart)(enum reboot_mode, const char *);
};
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define for_each_machine_desc(p) \
for (p = __arch_info_begin; p < __arch_info_end; p++)
@@ -81,7 +78,7 @@ extern struct machine_desc __arch_info_begin[], __arch_info_end[];
#define MACHINE_START(_type,_name) \
static const struct machine_desc __mach_desc_##_type \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = MACH_TYPE_##_type, \
.name = _name,
@@ -91,7 +88,7 @@ static const struct machine_desc __mach_desc_##_type \
#define DT_MACHINE_START(_name, _namestr) \
static const struct machine_desc __mach_desc_##_name \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = ~0, \
.name = _namestr,
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
index 9e614a18e680..5ec11d7f0d04 100644
--- a/arch/arm/include/asm/mach/dma.h
+++ b/arch/arm/include/asm/mach/dma.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/dma.h
*
* Copyright (C) 1998-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This header file describes the interface between the generic DMA handler
* (dma.c) and the architecture-specific DMA backends (dma-*.c)
*/
@@ -47,8 +44,3 @@ struct dma_struct {
* isa_dma_add - add an ISA-style DMA channel
*/
extern int isa_dma_add(unsigned int, dma_t *dma);
-
-/*
- * Add the ISA DMA controller. Always takes channels 0-7.
- */
-extern void isa_init_dma(void);
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index 4ca69fe2c850..c9cbfdefc938 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/flash.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_MACH_FLASH_H
#define ASMARM_MACH_FLASH_H
@@ -22,7 +19,7 @@ struct mtd_info;
* set_vpp: method called to enable or disable VPP
* mmcontrol: method called to enable or disable Sync. Burst Read in OneNAND
* parts: optional array of mtd_partitions for static partitioning
- * nr_parts: number of mtd_partitions for static partitoning
+ * nr_parts: number of mtd_partitions for static partitioning
*/
struct flash_platform_data {
const char *map_name;
diff --git a/arch/arm/include/asm/mach/irda.h b/arch/arm/include/asm/mach/irda.h
deleted file mode 100644
index 38f77b5e56cf..000000000000
--- a/arch/arm/include/asm/mach/irda.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/include/asm/mach/irda.h
- *
- * Copyright (C) 2004 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_MACH_IRDA_H
-#define __ASM_ARM_MACH_IRDA_H
-
-struct irda_platform_data {
- int (*startup)(struct device *);
- void (*shutdown)(struct device *);
- int (*set_power)(struct device *, unsigned int state);
- void (*set_speed)(struct device *, unsigned int speed);
-};
-
-#endif
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 2092ee1e1300..dfe832a3bfc7 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/irq.h
*
* Copyright (C) 1995-2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_MACH_IRQ_H
#define __ASM_ARM_MACH_IRQ_H
@@ -23,10 +20,10 @@ extern int show_fiq_list(struct seq_file *, int);
/*
* This is for easy migration, but should be changed in the source
*/
-#define do_bad_IRQ(irq,desc) \
+#define do_bad_IRQ(desc) \
do { \
raw_spin_lock(&desc->lock); \
- handle_bad_irq(irq, desc); \
+ handle_bad_irq(desc); \
raw_spin_unlock(&desc->lock); \
} while(0)
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 2fe141fcc8d6..2b8970d8e5a2 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/map.h
*
* Copyright (C) 1999-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Page table mapping constructs and function prototypes
*/
#ifndef __ASM_MACH_MAP_H
@@ -22,23 +19,29 @@ struct map_desc {
};
/* types 0-3 are defined in asm/io.h */
-#define MT_UNCACHED 4
-#define MT_CACHECLEAN 5
-#define MT_MINICLEAN 6
-#define MT_LOW_VECTORS 7
-#define MT_HIGH_VECTORS 8
-#define MT_MEMORY 9
-#define MT_ROM 10
-#define MT_MEMORY_NONCACHED 11
-#define MT_MEMORY_DTCM 12
-#define MT_MEMORY_ITCM 13
-#define MT_MEMORY_SO 14
-#define MT_MEMORY_DMA_READY 15
+enum {
+ MT_UNCACHED = 4,
+ MT_CACHECLEAN,
+ MT_MINICLEAN,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+ MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+ MT_MEMORY_RO,
+ MT_ROM,
+ MT_MEMORY_RWX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+ MT_MEMORY_RWX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+};
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller);
+extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
+ bool ng);
#ifdef CONFIG_DEBUG_LL
extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index a1c90d7feb0e..ea9bd08895b7 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/pci.h
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_MACH_PCI_H
@@ -16,35 +13,25 @@
struct pci_sys_data;
struct pci_ops;
struct pci_bus;
+struct pci_host_bridge;
struct device;
struct hw_pci {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
struct pci_ops *ops;
int nr_controllers;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
- struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
+ int (*scan)(int nr, struct pci_host_bridge *);
void (*preinit)(void);
void (*postinit)(void);
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
- resource_size_t (*align_resource)(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align);
};
/*
* Per-controller structure
*/
struct pci_sys_data {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
struct list_head node;
int busnr; /* primary bus number */
u64 mem_offset; /* bus->cpu memory mapping offset */
@@ -57,12 +44,6 @@ struct pci_sys_data {
u8 (*swizzle)(struct pci_dev *, u8 *);
/* IRQ mapping */
int (*map_irq)(const struct pci_dev *, u8, u8);
- /* Resource alignement requirements */
- resource_size_t (*align_resource)(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align);
void *private_data; /* platform controller private data */
};
@@ -102,8 +83,4 @@ extern int dc21285_setup(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
-extern struct pci_ops via82c505_ops;
-extern int via82c505_setup(int nr, struct pci_sys_data *);
-extern void via82c505_init(void *sysdata);
-
#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mach/sharpsl_param.h b/arch/arm/include/asm/mach/sharpsl_param.h
index 7a24ecf04220..700a377c20bf 100644
--- a/arch/arm/include/asm/mach/sharpsl_param.h
+++ b/arch/arm/include/asm/mach/sharpsl_param.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hardware parameter area specific to Sharp SL series devices
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
struct sharpsl_param_info {
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 90c12e1e695c..5f522916ec99 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -1,20 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mach/time.h
*
* Copyright (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-extern void timer_tick(void);
-
-struct timespec;
-typedef void (*clock_access_fn)(struct timespec *);
-extern int register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent);
+typedef void (*clock_access_fn)(struct timespec64 *);
+extern int register_persistent_clock(clock_access_fn read_persistent);
#endif
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index e8567bb99dfc..58922879a600 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Machine dependent access functions for RTC registers.
*/
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 0f7b7620e9a5..755c97de348c 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mcpm.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MCPM_H
@@ -20,7 +17,12 @@
* to consider dynamic allocation.
*/
#define MAX_CPUS_PER_CLUSTER 4
+
+#ifdef CONFIG_MCPM_QUAD_CLUSTER
+#define MAX_NR_CLUSTERS 4
+#else
#define MAX_NR_CLUSTERS 2
+#endif
#ifndef __ASSEMBLY__
@@ -42,10 +44,25 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
* CPU/cluster power operations API for higher subsystems to use.
*/
/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
+/**
* mcpm_cpu_power_up - make given CPU in given cluster runable
*
* @cpu: CPU number within given cluster
@@ -76,32 +93,63 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
*
* This must be called with interrupts disabled.
*
- * This does not return. Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
+ *
+ * On success, the CPU is not guaranteed to be truly halted until
+ * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
+ * specified cpu. Until then, other CPUs should make sure they do not
+ * trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
- * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
+ * make sure it is powered off
*
- * @expected_residency: duration in microseconds the CPU is expected
- * to remain suspended, or 0 if unknown/infinity.
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * Call this function to ensure that a pending powerdown has taken
+ * effect and the CPU is safely parked before performing non-mcpm
+ * operations that may affect the CPU (such as kexec trashing the
+ * kernel text).
+ *
+ * It is *not* necessary to call this function if you only need to
+ * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
+ * event.
+ *
+ * Do not call this function unless the specified CPU has already
+ * called mcpm_cpu_power_down() or has committed to doing so.
+ *
+ * @return:
+ * - zero if the CPU is in a safely parked state
+ * - nonzero otherwise (e.g., timeout)
+ */
+int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
- * The calling CPU is suspended. The expected residency argument is used
- * as a hint by the platform specific backend to implement the appropriate
- * sleep state level according to the knowledge it has on wake-up latency
- * for the given hardware.
+ * The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
+ * except for possible extra platform specific configuration steps to allow
+ * an asynchronous wake-up e.g. with a pending interrupt.
*
* If this CPU is found to be the "last man standing" in the cluster
- * then the cluster may be prepared for power-down too, if the expected
- * residency makes it worthwhile.
+ * then the cluster may be prepared for power-down too.
*
* This must be called with interrupts disabled.
*
- * This does not return. Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return. Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
*/
-void mcpm_cpu_suspend(u64 expected_residency);
+void mcpm_cpu_suspend(void);
/**
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
@@ -115,13 +163,69 @@ void mcpm_cpu_suspend(u64 expected_residency);
int mcpm_cpu_powered_up(void);
/*
- * Platform specific methods used in the implementation of the above API.
+ * Platform specific callbacks used in the implementation of the above API.
+ *
+ * cpu_powerup:
+ * Make given CPU runable. Called with MCPM lock held and IRQs disabled.
+ * The given cluster is assumed to be set up (cluster_powerup would have
+ * been called beforehand). Must return 0 for success or negative error code.
+ *
+ * cluster_powerup:
+ * Set up power for given cluster. Called with MCPM lock held and IRQs
+ * disabled. Called before first cpu_powerup when cluster is down. Must
+ * return 0 for success or negative error code.
+ *
+ * cpu_suspend_prepare:
+ * Special suspend configuration. Called on target CPU with MCPM lock held
+ * and IRQs disabled. This callback is optional. If provided, it is called
+ * before cpu_powerdown_prepare.
+ *
+ * cpu_powerdown_prepare:
+ * Configure given CPU for power down. Called on target CPU with MCPM lock
+ * held and IRQs disabled. Power down must be effective only at the next WFI instruction.
+ *
+ * cluster_powerdown_prepare:
+ * Configure given cluster for power down. Called on one CPU from target
+ * cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare
+ * for each CPU in the cluster has happened when this occurs.
+ *
+ * cpu_cache_disable:
+ * Clean and disable CPU level cache for the calling CPU. Called on with IRQs
+ * disabled only. The CPU is no longer cache coherent with the rest of the
+ * system when this returns.
+ *
+ * cluster_cache_disable:
+ * Clean and disable the cluster wide cache as well as the CPU level cache
+ * for the calling CPU. No call to cpu_cache_disable will happen for this
+ * CPU. Called with IRQs disabled and only when all the other CPUs are done
+ * with their own cpu_cache_disable. The cluster is no longer cache coherent
+ * with the rest of the system when this returns.
+ *
+ * cpu_is_up:
+ * Called on given CPU after it has been powered up or resumed. The MCPM lock
+ * is held and IRQs disabled. This callback is optional.
+ *
+ * cluster_is_up:
+ * Called by the first CPU to be powered up or resumed in given cluster.
+ * The MCPM lock is held and IRQs disabled. This callback is optional. If
+ * provided, it is called before cpu_is_up for that CPU.
+ *
+ * wait_for_powerdown:
+ * Wait until given CPU is powered down. This is called in sleeping context.
+ * Some reasonable timeout must be considered. Must return 0 for success or
+ * negative error code.
*/
struct mcpm_platform_ops {
- int (*power_up)(unsigned int cpu, unsigned int cluster);
- void (*power_down)(void);
- void (*suspend)(u64);
- void (*powered_up)(void);
+ int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
+ int (*cluster_powerup)(unsigned int cluster);
+ void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_powerdown_prepare)(unsigned int cluster);
+ void (*cpu_cache_disable)(void);
+ void (*cluster_cache_disable)(void);
+ void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
+ void (*cluster_is_up)(unsigned int cluster);
+ int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
};
/**
@@ -133,9 +237,47 @@ struct mcpm_platform_ops {
*/
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
-/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+/**
+ * mcpm_sync_init - Initialize the cluster synchronization support
+ *
+ * @power_up_setup: platform specific function invoked during very
+ * early CPU/cluster bringup stage.
+ *
+ * This prepares memory used by vlocks and the MCPM state machine used
+ * across CPUs that may have their caches active or inactive. Must be
+ * called only after a successful call to mcpm_platform_register().
+ *
+ * The power_up_setup argument is a pointer to assembly code called when
+ * the MMU and caches are still disabled during boot and no stack space is
+ * available. The affinity level passed to that code corresponds to the
+ * resource that needs to be initialized (e.g. 1 for cluster level, 0 for
+ * CPU level). Proper exclusion mechanisms are already activated at that
+ * point.
+ */
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+/**
+ * mcpm_loopback - make a run through the MCPM low-level code
+ *
+ * @cache_disable: pointer to function performing cache disabling
+ *
+ * This exercises the MCPM machinery by soft resetting the CPU and branching
+ * to the MCPM low-level entry code before returning to the caller.
+ * The @cache_disable function must do the necessary cache disabling to
+ * let the regular kernel init code turn it back on as if the CPU was
+ * hotplugged in. The MCPM state machine is set as if the cluster was
+ * initialized meaning the power_up_setup callback passed to mcpm_sync_init()
+ * will be invoked for all affinity levels. This may be useful to initialize
+ * some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
+ */
+int __init mcpm_loopback(void (*cache_disable)(void));
+
+void __init mcpm_smp_set_ops(void);
/*
+ * Synchronisation structures for coordinating safe cluster setup/teardown.
+ * This is private to the MCPM core code and shared between C and assembly.
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
* to match.
*/
@@ -156,19 +298,6 @@ struct sync_struct {
struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
};
-extern unsigned long sync_phys; /* physical address of *mcpm_sync */
-
-void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
-void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
-void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
-bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
-int __mcpm_cluster_state(unsigned int cluster);
-
-int __init mcpm_sync_init(
- void (*power_up_setup)(unsigned int affinity_level));
-
-void __init mcpm_smp_set_ops(void);
-
#else
/*
diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h
new file mode 100644
index 000000000000..529d2cf4d06f
--- /dev/null
+++ b/arch/arm/include/asm/mcs_spinlock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MCS_LOCK_H
+#define __ASM_MCS_LOCK_H
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+
+/* MCS spin-locking. */
+#define arch_mcs_spin_lock_contended(lock) \
+do { \
+ /* Ensure prior stores are observed before we enter wfe. */ \
+ smp_mb(); \
+ while (!(smp_load_acquire(lock))) \
+ wfe(); \
+} while (0) \
+
+#define arch_mcs_spin_unlock_contended(lock) \
+do { \
+ smp_store_release(lock, 1); \
+ dsb_sev(); \
+} while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* __ASM_MCS_LOCK_H */
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index 00ca5f92648e..b10fd358ccc5 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
-struct meminfo;
struct machine_desc;
-extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
-
+void arm_memblock_init(const struct machine_desc *);
phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e750a938fd3c..7c2fa7dcec6d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -1,44 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/memory.h
*
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Note: this file should not be included by non-asm/.h files
+ * Note: this file should not be included explicitly, include <asm/page.h>
+ * to get access to these definitions.
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#ifndef _ASMARM_PAGE_H
+#error "Do not include <asm/memory.h> directly"
+#endif
+
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
#include <linux/sizes.h>
-#include <asm/cache.h>
-
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
+#include <asm/kasan_def.h>
/*
- * Allow for constants defined here to be used from assembly code
- * by prepending the UL suffix only with actual C code compilation.
+ * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
+ * the virtual address range for userspace.
+ * KERNEL_OFFSET: the virtual address of the start of the kernel image.
+ * we may further offset this with TEXT_OFFSET in practice.
*/
-#define UL(x) _AC(x, UL)
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define KERNEL_OFFSET (PAGE_OFFSET)
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#ifndef CONFIG_KASAN
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
@@ -77,51 +83,61 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+#define FDT_FIXED_BASE UL(0xff800000)
+#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
*/
#define IOREMAP_MAX_ORDER 24
+#endif
-#define CONSISTENT_END (0xffe00000UL)
+#define VECTORS_BASE UL(0xffff0000)
#else /* CONFIG_MMU */
+#ifndef __ASSEMBLY__
+extern unsigned long setup_vectors_base(void);
+extern unsigned long vectors_base;
+#define VECTORS_BASE vectors_base
+#endif
+
/*
* The limitation of user task size can grow up to the end of free ram region.
* It is difficult to define and perhaps will never meet the original meaning
* of this define that was meant to.
* Fortunately, there is no reference for this in noMMU mode, for now.
*/
-#ifndef TASK_SIZE
-#define TASK_SIZE (CONFIG_DRAM_SIZE)
-#endif
+#define TASK_SIZE UL(0xffffffff)
#ifndef TASK_UNMAPPED_BASE
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
+#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */
+#ifdef CONFIG_XIP_KERNEL
+#define KERNEL_START _sdata
+#else
+#define KERNEL_START _stext
+#endif
+#define KERNEL_END _end
+
/*
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these
* locations
@@ -132,98 +148,160 @@
#endif
/*
- * Convert a physical address to a Page Frame Number and back
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
+ * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
*/
-#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
-#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-/*
- * Convert a page to/from a physical address
- */
-#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
-#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+#ifndef __ASSEMBLY__
/*
- * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
- * around in head.S and proc-*.S are shifted by this amount, in order to
- * leave spare high bits for systems with physical address extension. This
- * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
- * gives us about 38-bits or so.
+ * Physical start and end address of the kernel sections. These addresses are
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
-#ifdef CONFIG_ARM_LPAE
-#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
-#else
-#define ARCH_PGD_SHIFT 0
-#endif
-#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
-
-#ifndef __ASSEMBLY__
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ *
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
*/
-#ifndef __virt_to_phys
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x810000
+#define __PV_BITS_7_0 0x81
-extern unsigned long __pv_phys_offset;
-#define PHYS_OFFSET __pv_phys_offset
+extern unsigned long __pv_phys_pfn_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
-#define __pv_stub(from,to,instr,type) \
+#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
+#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
+
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
+ "2: " instr " %0, %0, %3\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 1b - ., 2b - .\n" \
" .popsection\n" \
: "=r" (to) \
- : "r" (from), "I" (type))
+ : "r" (from), "I" (__PV_BITS_31_24), \
+ "I"(__PV_BITS_23_16))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " adds %Q0, %1, %R0, lsl #20\n" \
+ "1: mov %R0, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - ., 1b - .\n" \
+ " .popsection\n" \
+ : "=&r" (y) \
+ : "r" (x), "I" (__PV_BITS_7_0) \
+ : "cc")
+
+#else
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "0: movw %0, #0\n" \
+ " lsl %0, #21\n" \
+ " " instr " %0, %1, %0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - .\n" \
+ " .popsection\n" \
+ : "=&r" (to) \
+ : "r" (from))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " lsls %R0, #21\n" \
+ " adds %Q0, %1, %R0\n" \
+ "1: mvn %R0, #0\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - ., 1b - .\n" \
+ " .popsection\n" \
+ : "=&r" (y) \
+ : "r" (x) \
+ : "cc")
+#endif
-static inline unsigned long __virt_to_phys(unsigned long x)
+static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
- unsigned long t;
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ phys_addr_t t;
+
+ if (sizeof(phys_addr_t) == 4) {
+ __pv_stub(x, t, "add");
+ } else {
+ __pv_add_carry_stub(x, t);
+ }
return t;
}
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
- __pv_stub(x, t, "sub", __PV_BITS_31_24);
+
+ /*
+ * 'unsigned long' cast discard upper word when
+ * phys_addr_t is 64 bit, and makes sure that inline
+ * assembler expression receives 32 bit argument
+ * in place where 'r' 32 bit operand is expected.
+ */
+ __pv_stub((unsigned long) x, t, "sub");
return t;
}
+
#else
-#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-#endif
-#endif /* __ASSEMBLY__ */
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
+static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
+{
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
#endif
-#ifndef __ASSEMBLY__
+static inline unsigned long virt_to_pfn(const void *p)
+{
+ unsigned long kaddr = (unsigned long)p;
+ return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+}
+#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
-/*
- * PFNs are used to describe any physical page; this means
- * PFN 0 == physical address 0.
- *
- * This is the PFN of the first RAM page in the kernel
- * direct-mapped view. We assume this is the first page
- * of RAM in the mem_map as well.
- */
-#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
+#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
@@ -231,47 +309,66 @@ static inline unsigned long __phys_to_virt(unsigned long x)
* translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h.
*/
+#define virt_to_phys virt_to_phys
static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys((unsigned long)(x));
}
+#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(phys_addr_t x)
{
- return (void *)(__phys_to_virt((unsigned long)(x)));
+ return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
+#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
+
+extern long long arch_phys_to_idmap_offset;
/*
- * Virtual <-> DMA view memory address translations
- * Again, these are *only* valid on the kernel direct mapped RAM
- * memory. Use of these is *deprecated* (and that doesn't mean
- * use the __ prefixed forms instead.) See dma-mapping.h.
+ * These are for systems that have a hardware interconnect supported alias
+ * of physical memory for idmap purposes. Most cases should leave these
+ * untouched. Note: this can only return addresses less than 4GiB.
*/
-#ifndef __virt_to_bus
-#define __virt_to_bus __virt_to_phys
-#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) __pfn_to_phys(x)
-#define __bus_to_pfn(x) __phys_to_pfn(x)
-#endif
+static inline bool arm_has_idmap_alias(void)
+{
+ return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
+}
+
+#define IDMAP_INVALID_ADDR ((u32)~0)
-#ifdef CONFIG_VIRT_TO_BUS
-static inline __deprecated unsigned long virt_to_bus(void *x)
+static inline unsigned long phys_to_idmap(phys_addr_t addr)
{
- return __virt_to_bus((unsigned long)x);
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
+ addr += arch_phys_to_idmap_offset;
+ if (addr > (u32)~0)
+ addr = IDMAP_INVALID_ADDR;
+ }
+ return addr;
}
-static inline __deprecated void *bus_to_virt(unsigned long x)
+static inline phys_addr_t idmap_to_phys(unsigned long idmap)
{
- return (void *)__bus_to_virt(x);
+ phys_addr_t addr = idmap;
+
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
+ addr -= arch_phys_to_idmap_offset;
+
+ return addr;
}
-#endif
+
+static inline unsigned long __virt_to_idmap(unsigned long x)
+{
+ return phys_to_idmap(__virt_to_phys(x));
+}
+
+#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
/*
* Conversion between a struct page and a physical address.
@@ -284,11 +381,10 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+ && pfn_valid(virt_to_pfn(kaddr)))
#endif
-#include <asm-generic/memory_model.h>
-
#endif
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 000000000000..2189e507c8e0
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e755..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM_MMU_H
#define __ARM_MMU_H
@@ -6,14 +7,24 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
+#else
+ int switch_pending;
+#endif
+ atomic_t vmalloc_seq;
+ unsigned long sigpage;
+#ifdef CONFIG_VDSO
+ unsigned long vdso;
+#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
#endif
- unsigned int vmalloc_seq;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
@@ -27,6 +38,10 @@ typedef struct {
*/
typedef struct {
unsigned long end_brk;
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d3..db2cb06aa8cf 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/mmu_context.h
*
* Copyright (C) 1996 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Changelog:
* 27-06-1996 RMK Created
*/
@@ -15,6 +12,9 @@
#include <linux/compiler.h>
#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/proc-fns.h>
@@ -23,10 +23,27 @@
void __check_vmalloc_seq(struct mm_struct *mm);
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+
+#define init_new_context init_new_context
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ atomic64_set(&mm->context.id, 0);
+ return 0;
+}
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
@@ -45,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
if (irqs_disabled())
/*
@@ -56,45 +72,42 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
+#ifndef MODULE
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
+#endif /* !MODULE */
#endif /* CONFIG_MMU */
-#define init_new_context(tsk,mm) 0
-
#endif /* CONFIG_CPU_HAS_ASID */
-#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
@@ -125,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { } while (0)
+#ifdef CONFIG_VMAP_STACK
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (mm != &init_mm)
+ check_vmalloc_seq(mm);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+#endif
+
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e5..07c51a34f77d 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -1,47 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_MODULE_H
#define _ASM_ARM_MODULE_H
#include <asm-generic/module.h>
-
-struct unwind_table;
+#include <asm/unwind.h>
#ifdef CONFIG_ARM_UNWIND
-enum {
- ARM_SEC_INIT,
- ARM_SEC_DEVINIT,
- ARM_SEC_CORE,
- ARM_SEC_EXIT,
- ARM_SEC_DEVEXIT,
- ARM_SEC_MAX,
+#define ELF_SECTION_UNWIND 0x70000001
+#endif
+
+#define PLT_ENT_STRIDE L1_CACHE_BYTES
+#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
+#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
+
+struct plt_entries {
+ u32 ldr[PLT_ENT_COUNT];
+ u32 lit[PLT_ENT_COUNT];
};
-struct mod_arch_specific {
- struct unwind_table *unwind[ARM_SEC_MAX];
+struct mod_plt_sec {
+ struct elf32_shdr *plt;
+ struct plt_entries *plt_ent;
+ int plt_count;
};
-#endif
-/*
- * Add the ARM architecture version to the version magic string
- */
-#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+struct mod_arch_specific {
+#ifdef CONFIG_ARM_UNWIND
+ struct list_head unwind_list;
+ struct unwind_table *init_table;
+#endif
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct mod_plt_sec core;
+ struct mod_plt_sec init;
+#endif
+};
-/* Add __virt_to_phys patching state as well */
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+struct module;
+u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
+#ifdef CONFIG_ARM_MODULE_PLTS
+bool in_module_plt(unsigned long loc);
#else
-#define MODULE_ARCH_VERMAGIC_P2V ""
+static inline bool in_module_plt(unsigned long loc) { return false; }
#endif
-/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
#ifdef CONFIG_THUMB2_KERNEL
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
-#else
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ if (ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ return sym->st_value & ~1;
+
+ return sym->st_value;
+}
#endif
-#define MODULE_ARCH_VERMAGIC \
- MODULE_ARCH_VERMAGIC_ARMVSN \
- MODULE_ARCH_VERMAGIC_ARMTHUMB \
- MODULE_ARCH_VERMAGIC_P2V
-
#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/module.lds.h b/arch/arm/include/asm/module.lds.h
new file mode 100644
index 000000000000..0e7cb4e314b4
--- /dev/null
+++ b/arch/arm/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_ARM_MODULE_PLTS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index c3247cc2fe08..5e088c83d3d8 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM_MPU_H
#define __ARM_MPU_H
-#ifdef CONFIG_ARM_MPU
-
/* MPUIR layout */
#define MPUIR_nU 1
#define MPUIR_DREGION 8
@@ -13,64 +12,122 @@
/* ID_MMFR0 data relevant to MPU */
#define MMFR0_PMSA (0xF << 4)
#define MMFR0_PMSAv7 (3 << 4)
+#define MMFR0_PMSAv8 (4 << 4)
/* MPU D/I Size Register fields */
-#define MPU_RSR_SZ 1
-#define MPU_RSR_EN 0
+#define PMSAv7_RSR_SZ 1
+#define PMSAv7_RSR_EN 0
+#define PMSAv7_RSR_SD 8
+
+/* Number of subregions (SD) */
+#define PMSAv7_NR_SUBREGS 8
+#define PMSAv7_MIN_SUBREG_SIZE 256
/* The D/I RSR value for an enabled region spanning the whole of memory */
-#define MPU_RSR_ALL_MEM 63
+#define PMSAv7_RSR_ALL_MEM 63
/* Individual bits in the DR/IR ACR */
-#define MPU_ACR_XN (1 << 12)
-#define MPU_ACR_SHARED (1 << 2)
+#define PMSAv7_ACR_XN (1 << 12)
+#define PMSAv7_ACR_SHARED (1 << 2)
/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
-#define MPU_RGN_CACHEABLE 0xB
-#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
-#define MPU_RGN_STRONGLY_ORDERED 0
+#define PMSAv7_RGN_CACHEABLE 0xB
+#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
+#define PMSAv7_RGN_STRONGLY_ORDERED 0
/* Main region should only be shared for SMP */
#ifdef CONFIG_SMP
-#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#else
-#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE
+#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
#endif
/* Access permission bits of ACR (only define those that we use)*/
-#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
-#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
-#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
+#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
+#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
+#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
+#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
+
+#define PMSAv8_BAR_XN 1
+
+#define PMSAv8_LAR_EN 1
+#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
+
+
+#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
+#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
+#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
+
+#ifdef CONFIG_SMP
+#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
+#else
+#define PMSAv8_RGN_SHARED (0 << 3)
+#endif
+
+#define PMSAv8_RGN_DEVICE_nGnRnE 0
+#define PMSAv8_RGN_NORMAL 1
+
+#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
+
+#ifdef CONFIG_CPU_V7M
+#define PMSAv8_MINALIGN 32
+#else
+#define PMSAv8_MINALIGN 64
+#endif
/* For minimal static MPU region configurations */
-#define MPU_PROBE_REGION 0
-#define MPU_BG_REGION 1
-#define MPU_RAM_REGION 2
-#define MPU_VECTORS_REGION 3
+#define PMSAv7_PROBE_REGION 0
+#define PMSAv7_BG_REGION 1
+#define PMSAv7_RAM_REGION 2
+#define PMSAv7_ROM_REGION 3
+
+/* Fixed for PMSAv8 only */
+#define PMSAv8_XIP_REGION 0
+#define PMSAv8_KERNEL_REGION 1
/* Maximum number of regions Linux is interested in */
-#define MPU_MAX_REGIONS 16
+#define MPU_MAX_REGIONS 16
-#define MPU_DATA_SIDE 0
-#define MPU_INSTR_SIDE 1
+#define PMSAv7_DATA_SIDE 0
+#define PMSAv7_INSTR_SIDE 1
#ifndef __ASSEMBLY__
struct mpu_rgn {
/* Assume same attributes for d/i-side */
- u32 drbar;
- u32 drsr;
- u32 dracr;
+ union {
+ u32 drbar; /* PMSAv7 */
+ u32 prbar; /* PMSAv8 */
+ };
+ union {
+ u32 drsr; /* PMSAv7 */
+ u32 prlar; /* PMSAv8 */
+ };
+ union {
+ u32 dracr; /* PMSAv7 */
+ u32 unused; /* not used in PMSAv8 */
+ };
};
struct mpu_rgn_info {
- u32 mpuir;
+ unsigned int used;
struct mpu_rgn rgns[MPU_MAX_REGIONS];
};
extern struct mpu_rgn_info mpu_rgn_info;
-#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_ARM_MPU
+extern void __init pmsav7_adjust_lowmem_bounds(void);
+extern void __init pmsav8_adjust_lowmem_bounds(void);
-#endif /* CONFIG_ARM_MPU */
+extern void __init pmsav7_setup(void);
+extern void __init pmsav8_setup(void);
+#else
+static inline void pmsav7_adjust_lowmem_bounds(void) {};
+static inline void pmsav8_adjust_lowmem_bounds(void) {};
+static inline void pmsav7_setup(void) {};
+static inline void pmsav8_setup(void) {};
+#endif
+
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
index d79d66d2cf71..dfcef0152e3d 100644
--- a/arch/arm/include/asm/mtd-xip.h
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MTD primitives for XIP support. Architecture specific functions
*
@@ -6,10 +7,6 @@
* Author: Nicolas Pitre
* Created: Nov 2, 2004
* Copyright: (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARM_MTD_XIP_H__
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
deleted file mode 100644
index 87c044910fe0..000000000000
--- a/arch/arm/include/asm/mutex.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/include/asm/mutex.h
- *
- * ARM optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-/*
- * On pre-ARMv6 hardware this results in a swp-based implementation,
- * which is the most efficient. For ARMv6+, we have exclusive memory
- * accessors and use atomic_dec to avoid the extra xchg operations
- * on the locking slowpaths.
- */
-#if __LINUX_ARM_ARCH__ < 6
-#include <asm-generic/mutex-xchg.h>
-#else
-#include <asm-generic/mutex-dec.h>
-#endif
-#endif /* _ASM_MUTEX_H */
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 000000000000..aac10ba33ee2
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ * -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+ BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
index 04e5a557a884..66b7e68c9b58 100644
--- a/arch/arm/include/asm/nwflash.h
+++ b/arch/arm/include/asm/nwflash.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FLASH_H
#define _FLASH_H
-#define FLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
#define CMD_WRITE_DISABLE 0
#define CMD_WRITE_ENABLE 0x28
#define CMD_WRITE_BASE64K_ENABLE 0x47
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
index bc3a9174417c..b6f4b35024b8 100644
--- a/arch/arm/include/asm/opcodes-sec.h
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
index efcfdf92d9d5..0b58da81b78e 100644
--- a/arch/arm/include/asm/opcodes-virt.h
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
* Copyright (C) 2012 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __ASM_ARM_OPCODES_VIRT_H
#define __ASM_ARM_OPCODES_VIRT_H
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index e796c598513b..38e3eabff5c3 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/opcodes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_OPCODES_H
@@ -113,12 +110,17 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
-#ifndef CONFIG_CPU_ENDIAN_BE32
+#ifdef CONFIG_CPU_ENDIAN_BE32
+#ifndef __ASSEMBLY__
/*
* On BE32 systems, using 32-bit accesses to store Thumb instructions will not
* work in all cases, due to alignment constraints. For now, a correct
- * version is not provided for BE32.
+ * version is not provided for BE32, but the prototype needs to be there
+ * to compile patch.c.
*/
+extern __u32 __opcode_to_mem_thumb32(__u32);
+#endif
+#else
#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 12f71a190422..3364637755e8 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
@@ -23,58 +11,92 @@
#include <linux/types.h>
+struct l2x0_regs;
+
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
void (*flush_all)(void);
- void (*inv_all)(void);
void (*disable)(void);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
- void (*set_debug)(unsigned long);
void (*resume)(void);
-};
-#ifdef CONFIG_OUTER_CACHE
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
+};
extern struct outer_cache_fns outer_cache;
+#ifdef CONFIG_OUTER_CACHE
+/**
+ * outer_inv_range - invalidate range of outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
+
+/**
+ * outer_clean_range - clean dirty outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
+
+/**
+ * outer_flush_range - clean and invalidate outer cache lines
+ * @start: starting physical address, inclusive
+ * @end: end physical address, exclusive
+ */
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
+/**
+ * outer_flush_all - clean and invalidate all cache lines in the outer cache
+ *
+ * Note: depending on implementation, this may not be atomic - it must
+ * only be called with interrupts disabled and no other active outer
+ * cache masters.
+ *
+ * It is intended that this function is only used by implementations
+ * needing to override the outer_cache.disable() method due to security.
+ * (Some implementations perform this as a clean followed by an invalidate.)
+ */
static inline void outer_flush_all(void)
{
if (outer_cache.flush_all)
outer_cache.flush_all();
}
-static inline void outer_inv_all(void)
-{
- if (outer_cache.inv_all)
- outer_cache.inv_all();
-}
-
-static inline void outer_disable(void)
-{
- if (outer_cache.disable)
- outer_cache.disable();
-}
+/**
+ * outer_disable - clean, invalidate and disable the outer cache
+ *
+ * Disable the outer cache, ensuring that any data contained in the outer
+ * cache is pushed out to lower levels of system memory. The note and
+ * conditions above concerning outer_flush_all() applies here.
+ */
+extern void outer_disable(void);
+/**
+ * outer_resume - restore the cache configuration and re-enable outer cache
+ *
+ * Restore any configuration that the cache had when previously enabled,
+ * and re-enable the outer cache.
+ */
static inline void outer_resume(void)
{
if (outer_cache.resume)
@@ -90,21 +112,9 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{ }
static inline void outer_flush_all(void) { }
-static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { }
static inline void outer_resume(void) { }
#endif
-#ifdef CONFIG_OUTER_CACHE_SYNC
-static inline void outer_sync(void)
-{
- if (outer_cache.sync)
- outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index d1b162a18dcb..7c2c72323d17 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -1,25 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/page-nommu.h
*
* Copyright (C) 2004 Hyok S. Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_NOMMU_H
#define _ASMARM_PAGE_NOMMU_H
-#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
-#define KTHREAD_SIZE (8192)
-#else
-#define KTHREAD_SIZE PAGE_SIZE
-#endif
-
-#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr) free_page(addr)
-
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d505..ef11b721230e 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -1,19 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/page.h
*
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+#include <vdso/page.h>
#ifndef __ASSEMBLY__
@@ -116,6 +110,28 @@ struct cpu_user_fns {
unsigned long vaddr, struct vm_area_struct *vma);
};
+void fa_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
+void feroceon_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wb_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wt_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+
#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;
@@ -142,12 +158,17 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
#else
#include <asm/pgtable-2level-types.h>
+#ifdef CONFIG_VMAP_STACK
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+#endif
#endif
#endif /* CONFIG_MMU */
@@ -156,16 +177,16 @@ typedef struct page *pgtable_t;
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
+#define pfn_valid pfn_valid
#endif
-#include <asm/memory.h>
-
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#include <asm/memory.h>
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/getorder.h>
+#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
new file mode 100644
index 000000000000..95d5b0d625cd
--- /dev/null
+++ b/arch/arm/include/asm/paravirt.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_PARAVIRT_H
+#define _ASM_ARM_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/paravirt_api_clock.h b/arch/arm/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..65ac7cee0dad
--- /dev/null
+++ b/arch/arm/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index a98a2e112fae..5916b88d4c94 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_PCI_H
#define ASMARM_PCI_H
#ifdef __KERNEL__
-#include <asm-generic/pci-dma-compat.h>
-#include <asm-generic/pci-bridge.h>
-
#include <asm/mach/pci.h> /* for pci_sys_data */
extern unsigned long pcibios_min_io;
@@ -12,59 +10,19 @@ extern unsigned long pcibios_min_io;
extern unsigned long pcibios_min_mem;
#define PCIBIOS_MIN_MEM pcibios_min_mem
-static inline int pcibios_assign_all_busses(void)
-{
- return pci_has_flag(PCI_REASSIGN_ALL_RSRC);
-}
+#define pcibios_assign_all_busses() pci_has_flag(PCI_REASSIGN_ALL_BUS)
#ifdef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus)
-{
- struct pci_sys_data *root = bus->sysdata;
-
- return root->domain;
-}
-
static inline int pci_proc_domain(struct pci_bus *bus)
{
return pci_domain_nr(bus);
}
#endif /* CONFIG_PCI_DOMAINS */
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
-/*
- * The PCI address space does equal the physical memory address space.
- * The networking and block device layers use this boolean for bounce
- * buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (1)
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- *strat = PCI_DMA_BURST_INFINITY;
- *strategy_parameter = ~0UL;
-}
-#endif
-
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-/*
- * Dummy implementation; always return 0.
- */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return 0;
-}
+extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */
-
#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 209e6504922e..7545c87c251f 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -1,43 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2012 Calxeda, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/insn.h>
+
+register unsigned long current_stack_pointer asm ("sp");
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
-#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
+#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
+ extern unsigned int smp_on_up;
+
+ if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
+ return;
+
/* Set TPIDRPRW */
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- register unsigned long *sp asm ("sp");
/*
* Read TPIDRPRW.
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+ asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __per_cpu_offset \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 755877527cf9..c08f16f2e243 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -1,31 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/include/asm/perf_event.h
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS 32
-
-#define HW_OP_UNSUPPORTED 0xFFFF
-#define C(_x) PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED 0xFFFF
-
-#ifdef CONFIG_HW_PERF_EVENTS
-struct pt_regs;
-extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-#endif
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->ARM_pc = (__ip); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
+ (regs)->ARM_sp = current_stack_pointer; \
+ (regs)->ARM_cpsr = SVC_MODE; \
+}
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 943504f53f57..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgalloc.h
*
* Copyright (C) 2000-2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
@@ -18,25 +15,13 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#define check_pgt_cache() do { } while (0)
-
#ifdef CONFIG_MMU
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#ifdef CONFIG_ARM_LPAE
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- free_page((unsigned long)pmd);
-}
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
@@ -44,21 +29,24 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#else /* !CONFIG_ARM_LPAE */
+#define PGD_SIZE (PAGE_SIZE << 2)
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
+#ifdef CONFIG_KASAN
+/* The KASan core unconditionally calls pud_populate() on all architectures */
+#define pud_populate(mm,pmd,pte) do { } while (0)
+#else
#define pud_populate(mm,pmd,pte) BUG()
-
+#endif
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
-
static inline void clean_pte_table(pte_t *pte)
{
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
@@ -80,50 +68,40 @@ static inline void clean_pte_table(pte_t *pte)
* | h/w pt 1 |
* +------------+
*/
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PGD_FREE
+#include <asm-generic/pgalloc.h>
+
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
+ pte_t *pte = __pte_alloc_one_kernel(mm);
- pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_pte_table(pte);
return pte;
}
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- struct page *pte;
-
#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
#else
- pte = alloc_pages(PGALLOC_GFP, 0);
+#define PGTABLE_HIGHMEM 0
#endif
- if (pte) {
- if (!PageHighMem(pte))
- clean_pte_table(page_address(pte));
- pgtable_page_ctor(pte);
- }
-
- return pte;
-}
-/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm)
{
- if (pte)
- free_page((unsigned long)pte);
-}
+ struct page *pte;
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
+ if (!pte)
+ return NULL;
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
+ return pte;
}
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
@@ -155,9 +133,16 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
+ extern pmdval_t user_pmd_table;
+ pmdval_t prot;
+
+ if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
+ prot = user_pmd_table;
+ else
+ prot = _PAGE_USER_TABLE;
+
+ __pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401..556937e1790e 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level-hwdef.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H
#define _ASM_PGTABLE_2LEVEL_HWDEF_H
@@ -20,12 +17,15 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
*/
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
@@ -44,6 +44,7 @@
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/*
diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h
index 66cb5b0e89c5..650e793f4142 100644
--- a/arch/arm/include/asm/pgtable-2level-types.h
+++ b/arch/arm/include/asm/pgtable-2level-types.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level-types.h
*
* Copyright (C) 1995-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H
#define _ASM_PGTABLE_2LEVEL_TYPES_H
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..6b5392e20f41 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-2level.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
+#define __PAGETABLE_PMD_FOLDED 1
+
/*
* Hardware-wise, we have a two level page table structure, where the first
* level has 4096 entries, and the second level has 256 entries. Each entry
@@ -76,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -118,7 +119,6 @@
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
@@ -126,9 +126,41 @@
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE L_PTE_RDONLY
+
/*
* These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type Pre-ARMv6 ARMv6+ type / cacheable status
+ * UNCACHED Uncached Strongly ordered
+ * BUFFERABLE Bufferable Normal memory / non-cacheable
+ * WRITETHROUGH Writethrough Normal memory / write through
+ * WRITEBACK Writeback Normal memory / write back, read alloc
+ * MINICACHE Minicache N/A
+ * WRITEALLOC Writeback Normal memory / write back, write alloc
+ * DEV_SHARED Uncached Device memory (shared)
+ * DEV_NONSHARED Uncached Device memory (non-shared)
+ * DEV_WC Bufferable Normal memory / non-cacheable
+ * DEV_CACHED Writeback Normal memory / write back, read alloc
+ * VECTORS Variable Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
@@ -140,6 +172,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
@@ -149,18 +182,40 @@
* the pud: the pud entry is never bad, always exists, and can't be set or
* cleared.
*/
-#define pud_none(pud) (0)
-#define pud_bad(pud) (0)
-#define pud_present(pud) (1)
-#define pud_clear(pudp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
+static inline int pud_none(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return 1;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
+
+#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & PMD_TYPE_SECT)
+#define pmd_bad(pmd) pmd_leaf(pmd)
+#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -181,6 +236,12 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 626989fec4d3..944fc9955528 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level-hwdef.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
#define _ASM_PGTABLE_3LEVEL_HWDEF_H
@@ -26,6 +14,7 @@
* + Level 1/2 descriptor
* - common
*/
+#define PUD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
@@ -43,7 +32,7 @@
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
-#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
+#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -62,6 +51,7 @@
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
/*
* + Level 3 descriptor (PTE)
@@ -72,9 +62,11 @@
#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
+#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
/*
@@ -83,6 +75,7 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#ifndef CONFIG_CPU_TTBR0_PAN
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -102,5 +95,35 @@
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
+#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
+
+/*
+ * TTBCR register bits.
+ *
+ * The ORGN0 and IRGN0 bits enables different forms of caching when
+ * walking the translation table. Clearing these bits (which is claimed
+ * to be the reset default) means "normal memory, [outer|inner]
+ * non-cacheable"
+ */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1_MASK (3 << 28)
+#define TTBCR_ORGN1_MASK (3 << 26)
+#define TTBCR_IRGN1_MASK (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ_MASK (7 << 16)
+#define TTBCR_SH0_MASK (3 << 12)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ_MASK (7 << 0)
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
index 921aa30259c4..d0f587a2129d 100644
--- a/arch/arm/include/asm/pgtable-3level-types.h
+++ b/arch/arm/include/asm/pgtable-3level-types.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level-types.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
#define _ASM_PGTABLE_3LEVEL_TYPES_H
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f5..7b71a3d414b7 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-3level.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_H
#define _ASM_PGTABLE_3LEVEL_H
@@ -37,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
@@ -77,20 +67,22 @@
*/
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
-#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
-#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
-#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
+#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
-#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
-#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
-#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 7)
+
+#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
/*
* To be used in assembly code with the upper page attributes.
@@ -117,29 +109,16 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
-/*
- * 2nd stage PTE definitions for LPAE.
- */
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-
-/*
- * Hyp-mode PL2 PTE definitions for LPAE.
- */
-#define L_PTE_HYP L_PTE_USER
-
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & 2))
+#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) (pud_val(pud))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
+#define pmd_leaf(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
do { \
@@ -153,19 +132,12 @@
flush_pmd_entry(pudp); \
} while (0)
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the second-level page table.. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
-#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
#define copy_pmd(pmdpd,pmdps) \
do { \
@@ -201,39 +173,56 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
-#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
+ : !!(pmd_val(pmd) & (val)))
+#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+
+#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
+#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
+#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= L_PTE_SPECIAL;
+ return pte;
+}
+
+#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
+#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-#define __HAVE_ARCH_PMD_WRITE
-#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
#endif
#define PMD_BIT_FUNC(fn,op) \
static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
-PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
-PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
-PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkwrite_novma, &= ~L_PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
-#define pmd_mknotpresent(pmd) (__pmd(0))
+/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
+#define pmdp_establish generic_pmdp_establish
+
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
+}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
- PMD_SECT_VALID | PMD_SECT_NONE;
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
+ L_PMD_SECT_VALID | L_PMD_SECT_NONE;
pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
return pmd;
}
@@ -244,18 +233,18 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
BUG_ON(addr >= TASK_SIZE);
/* create a faulting entry if PROT_NONE protected */
- if (pmd_val(pmd) & PMD_SECT_NONE)
- pmd_val(pmd) &= ~PMD_SECT_VALID;
+ if (pmd_val(pmd) & L_PMD_SECT_NONE)
+ pmd_val(pmd) &= ~L_PMD_SECT_VALID;
+
+ if (pmd_write(pmd) && pmd_dirty(pmd))
+ pmd_val(pmd) &= ~PMD_SECT_AP2;
+ else
+ pmd_val(pmd) |= PMD_SECT_AP2;
*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
flush_pmd_entry(pmdp);
}
-static inline int has_transparent_hugepage(void)
-{
- return 1;
-}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index 8426229ba292..d60548ccd13e 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-hwdef.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 0642228ff785..61480d096054 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-nommu.h
*
* Copyright (C) 1995-2002 Russell King
* Copyright (C) 2004 Hyok S. Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_NOMMU_H
#define _ASMARM_PGTABLE_NOMMU_H
@@ -24,9 +21,6 @@
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
-/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -45,29 +39,15 @@
#define swapper_pg_dir ((pgd_t *) 0)
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
typedef pte_t *pte_addr_t;
-static inline int pte_file(pte_t pte) { return 0; }
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
/*
* Mark the prot value as uncacheable and unbufferable.
*/
-#define pgprot_noncached(prot) __pgprot(0)
-#define pgprot_writecombine(prot) __pgprot(0)
-#define pgprot_dmacoherent(prot) __pgprot(0)
+#define pgprot_noncached(prot) (prot)
+#define pgprot_writecombine(prot) (prot)
+#define pgprot_device(prot) (prot)
/*
@@ -76,20 +56,13 @@ static inline int pte_file(pte_t pte) { return 0; }
extern unsigned int kobjsize(const void *objp);
/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
#define VMALLOC_START 0UL
#define VMALLOC_END 0xffffffffUL
-#define FIRST_USER_ADDRESS (0)
-
-#include <asm-generic/pgtable.h>
+#define FIRST_USER_ADDRESS 0UL
#else
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 04aeb02d2e11..86378eec7757 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H
@@ -13,15 +10,23 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
-#ifndef CONFIG_MMU
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
+
+#ifndef CONFIG_MMU
#include <asm/pgtable-nommu.h>
#else
-#include <asm-generic/pgtable-nopud.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
@@ -43,7 +48,7 @@
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define VMALLOC_END 0xff000000UL
+#define VMALLOC_END 0xff800000UL
#define LIBRARY_TEXT_START 0x0c000000
@@ -61,7 +66,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS PAGE_SIZE
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
/*
* Use TASK_SIZE as the ceiling argument for free_pgtables() and
@@ -82,9 +87,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-extern pgprot_t pgprot_hyp_device;
-extern pgprot_t pgprot_s2;
-extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -97,10 +99,6 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
-#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
-#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -122,6 +120,9 @@ extern pgprot_t pgprot_s2_device;
#define pgprot_stronglyordered(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
@@ -144,45 +145,17 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgdp_get(pgpd) READ_ONCE(*pgdp)
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
@@ -191,38 +164,40 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#ifndef CONFIG_HIGHPTE
-#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
-#define __pte_unmap(pte) do { } while (0)
-#else
-#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
-#define __pte_unmap(pte) kunmap_atomic(pte)
-#endif
-
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
-
-#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte)
-
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
+ : !!(pte_val(pte) & (val)))
+#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
+
#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte) (0)
+#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
+#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
+#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
+#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
+
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
+
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+ pteval_t needed = mask;
+
+ if (write)
+ mask |= L_PTE_RDONLY;
-#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+ return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
@@ -232,30 +207,63 @@ static inline void __sync_icache_dcache(pte_t pteval)
extern void __sync_icache_dcache(pte_t pteval);
#endif
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
+#define PFN_PTE_SHIFT PAGE_SHIFT
+
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr);
+#define set_ptes set_ptes
+
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= ~pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
- unsigned long ext = 0;
+ return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
- if (addr < TASK_SIZE && pte_present_user(pteval)) {
- __sync_icache_dcache(pteval);
- ext |= PTE_EXT_NG;
- }
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
- set_pte_ext(ptep, pteval, ext);
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
}
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
-PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
-PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_XN));
+}
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+static inline pte_t pte_mknexec(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_XN));
+}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -266,27 +274,47 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
}
/*
- * Encode and decode a swap entry. Swap entries are stored in the Linux
- * page tables as follows:
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset ----------------------> < type -> 0 0 0
+ * <------------------- offset ------------------> E < type -> 0 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
*
* This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
-#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#define __swp_entry_to_pte(swp) __pte((swp).val)
+
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_isset(pte, L_PTE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
/*
* It is an error for the kernel to have more swap files than we can
@@ -296,33 +324,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
/*
- * Encode and decode a file entry. File entries are stored in the Linux
- * page tables as follows:
- *
- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <----------------------- offset ------------------------> 1 0 0
- */
-#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 29
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-/* FIXME: this is not correct */
-#define kern_addr_valid(addr) (1)
-
-#include <asm-generic/pgtable.h>
-
-/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index f24edad26c70..000000000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * linux/arch/arm/include/asm/pmu.h
- *
- * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- * @runtime_resume: an optional handler which will be called by the
- * runtime PM framework following a call to pm_runtime_get().
- * Note that if pm_runtime_get() is called more than once in
- * succession this handler will only be called once.
- * @runtime_suspend: an optional handler which will be called by the
- * runtime PM framework following a call to pm_runtime_put().
- * Note that if pm_runtime_get() is called more than once in
- * succession this handler will only be called following the
- * final call to pm_runtime_put() that actually disables the
- * hardware.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
- int (*runtime_resume)(struct device *dev);
- int (*runtime_suspend)(struct device *dev);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
- /*
- * The events that are active on the PMU for the given index.
- */
- struct perf_event **events;
-
- /*
- * A 1 bit for an index indicates that the counter is being used for
- * an event. A 0 means that the counter can be used.
- */
- unsigned long *used_mask;
-
- /*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-};
-
-struct arm_pmu {
- struct pmu pmu;
- cpumask_t active_irqs;
- char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct perf_event *event);
- void (*disable)(struct perf_event *event);
- int (*get_event_idx)(struct pmu_hw_events *hw_events,
- struct perf_event *event);
- int (*set_event_filter)(struct hw_perf_event *evt,
- struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
- void (*start)(struct arm_pmu *);
- void (*stop)(struct arm_pmu *);
- void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
- int (*map_event)(struct perf_event *event);
- int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
- u64 max_period;
- struct platform_device *plat_device;
- struct pmu_hw_events *(*get_hw_events)(void);
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-extern const struct dev_pm_ops armpmu_dev_pm_ops;
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
- const unsigned (*event_map)[PERF_COUNT_HW_MAX],
- const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX],
- u32 raw_event_mask);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
new file mode 100644
index 000000000000..ebbd9ec95d21
--- /dev/null
+++ b/arch/arm/include/asm/probes.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/arm/include/asm/probes.h
+ *
+ * Original contents copied from arch/arm/include/asm/kprobes.h
+ * which contains the following notice...
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ */
+
+#ifndef _ASM_PROBES_H
+#define _ASM_PROBES_H
+
+#ifndef __ASSEMBLY__
+
+typedef u32 probes_opcode_t;
+
+struct arch_probes_insn;
+typedef void (probes_insn_handler_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef unsigned long (probes_check_cc)(unsigned long);
+typedef void (probes_insn_singlestep_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef void (probes_insn_fn_t)(void);
+
+/* Architecture specific copy of original instruction. */
+struct arch_probes_insn {
+ probes_opcode_t *insn;
+ probes_insn_handler_t *insn_handler;
+ probes_check_cc *insn_check_cc;
+ probes_insn_singlestep_t *insn_singlestep;
+ probes_insn_fn_t *insn_fn;
+ int stack_space;
+ unsigned long register_usage_flags;
+ bool kprobe_direct_exec;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * We assume one instruction can consume at most 64 bytes stack, which is
+ * 'push {r0-r15}'. Instructions consume more or unknown stack space like
+ * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
+ */
+#define MAX_STACK_SIZE 64
+
+#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 5324c1112f3a..b4986a23d852 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/proc-fns.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROCFNS_H
#define __ASM_PROCFNS_H
@@ -23,7 +20,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
-extern struct processor {
+struct processor {
/* MISC
* get data abort address/flags
*/
@@ -37,13 +34,17 @@ extern struct processor {
*/
void (*_proc_init)(void);
/*
+ * Check for processor bugs
+ */
+ void (*check_bugs)(void);
+ /*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
- void (*reset)(unsigned long addr) __attribute__((noreturn));
+ void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
/*
* Idle the processor
*/
@@ -75,9 +76,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
-} processor;
+};
#ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
@@ -88,28 +93,59 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
#endif
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
-#define cpu_proc_init processor._proc_init
-#define cpu_proc_fin processor._proc_fin
-#define cpu_reset processor.reset
-#define cpu_do_idle processor._do_idle
-#define cpu_dcache_clean_area processor.dcache_clean_area
-#define cpu_set_pte_ext processor.set_pte_ext
-#define cpu_do_switch_mm processor.switch_mm
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend processor.do_suspend
-#define cpu_do_resume processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised. We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f) cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ unsigned int cpu = smp_processor_id();
+ *cpu_vtable[cpu] = *p;
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+ cpu_vtable[0]->dcache_clean_area);
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+ cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f) processor.f
+#define PROC_TABLE(f) processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ processor = *p;
+}
#endif
-extern void cpu_resume(void);
+#define cpu_proc_init PROC_VTABLE(_proc_init)
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
+#define cpu_reset PROC_VTABLE(reset)
+#define cpu_do_idle PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
+#define cpu_do_resume PROC_VTABLE(do_resume)
+#endif
-#include <asm/memory.h>
+extern void cpu_resume(void);
#ifdef CONFIG_MMU
@@ -125,13 +161,6 @@ extern void cpu_resume(void);
ttbr; \
})
-#define cpu_set_ttbr(nr, val) \
- do { \
- u64 ttbr = val; \
- __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
- : : "r" (ttbr)); \
- } while (0)
-
#define cpu_get_pgd() \
({ \
u64 pg = cpu_get_ttbr(0); \
@@ -149,6 +178,18 @@ extern void cpu_resume(void);
})
#endif
+static inline unsigned int cpu_get_ttbcr(void)
+{
+ unsigned int ttbcr;
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ return ttbcr;
+}
+
+static inline void cpu_set_ttbcr(unsigned int ttbcr)
+{
+ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
+}
+
#else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { }
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaac..326864f79d18 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -1,27 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/processor.h
*
* Copyright (C) 1995-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PROCESSOR_H
#define __ASM_ARM_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/unified.h>
+#include <asm/vdso/processor.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -44,18 +37,36 @@ struct thread_struct {
struct debug_info debug;
};
-#define INIT_THREAD { }
+/*
+ * Everything usercopied to/from thread_struct is statically-sized, so
+ * no hardened usercopy whitelist is needed.
+ */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = *size = 0;
+}
-#ifdef CONFIG_MMU
-#define nommu_start_thread(regs) do { } while (0)
-#else
-#define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
-#endif
+#define INIT_THREAD { }
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
+ unsigned long r7, r8, r9; \
+ \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
+ r7 = regs->ARM_r7; \
+ r8 = regs->ARM_r8; \
+ r9 = regs->ARM_r9; \
+ } \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
+ current->personality & FDPIC_FUNCPTRS) { \
+ regs->ARM_r7 = r7; \
+ regs->ARM_r8 = r8; \
+ regs->ARM_r9 = r9; \
+ regs->ARM_r10 = current->mm->start_data; \
+ } else if (!IS_ENABLED(CONFIG_MMU)) \
+ regs->ARM_r10 = current->mm->start_data; \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
@@ -65,25 +76,12 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
- nommu_start_thread(regs); \
})
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() smp_mb()
-#else
-#define cpu_relax() barrier()
-#endif
+unsigned long __get_wchan(struct task_struct *p);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
@@ -91,6 +89,18 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up) \
+ "9998: " smp "\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .align 2\n" \
+ " .long 9998b - .\n" \
+ " " up "\n" \
+ " .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
/*
* Prefetching support - only ARMv5.
*/
@@ -101,20 +111,23 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
- :
- : "p" (ptr)
- : "cc");
+ :: "p" (ptr));
}
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+ __asm__ __volatile__(
+ ".arch_extension mp\n"
+ __ALT_SMP_ASM(
+ "pldw\t%a0",
+ "pld\t%a0"
+ )
+ :: "p" (ptr));
+}
+#endif
#endif
-
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
#endif
diff --git a/arch/arm/include/asm/procinfo.h b/arch/arm/include/asm/procinfo.h
index ca52e584ef74..42df316fb8ba 100644
--- a/arch/arm/include/asm/procinfo.h
+++ b/arch/arm/include/asm/procinfo.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/procinfo.h
*
* Copyright (C) 1996-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_PROCINFO_H
#define __ASM_PROCINFO_H
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index a219227c3e43..402e3f34c7ed 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -1,32 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/prom.h
*
* Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ASMARM_PROM_H
#define __ASMARM_PROM_H
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
#ifdef CONFIG_OF
-extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
-extern void arm_dt_memblock_reserve(void);
+extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}
-static inline void arm_dt_memblock_reserve(void) { }
static inline void arm_dt_init_cpu_maps(void) { }
#endif /* CONFIG_OF */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c4ae171850f8..536e155328fa 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
@@ -14,31 +7,11 @@
#ifndef __ASM_ARM_PSCI_H
#define __ASM_ARM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-extern struct smp_operations psci_smp_ops;
+extern const struct smp_operations psci_smp_ops;
-#ifdef CONFIG_ARM_PSCI
-void psci_init(void);
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
bool psci_smp_available(void);
#else
-static inline void psci_init(void) { }
static inline bool psci_smp_available(void) { return false; }
#endif
diff --git a/arch/arm/include/asm/ptdump.h b/arch/arm/include/asm/ptdump.h
new file mode 100644
index 000000000000..46a4575146ee
--- /dev/null
+++ b/arch/arm/include/asm/ptdump.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 ARM Ltd. */
+#ifndef __ASM_PTDUMP_H
+#define __ASM_PTDUMP_H
+
+#ifdef CONFIG_ARM_PTDUMP_CORE
+
+#include <linux/mm_types.h>
+#include <linux/seq_file.h>
+
+struct addr_marker {
+ unsigned long start_address;
+ char *name;
+};
+
+struct ptdump_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+};
+
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
+#define EFI_RUNTIME_MAP_END SZ_1G
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+#else
+static inline void ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name) { }
+#endif /* CONFIG_ARM_PTDUMP_DEBUGFS */
+
+void ptdump_check_wx(void);
+
+#endif /* CONFIG_ARM_PTDUMP_CORE */
+
+#ifdef CONFIG_ARM_DEBUG_WX
+#define arm_debug_checkwx() ptdump_check_wx()
+#else
+#define arm_debug_checkwx() do { } while (0)
+#endif
+
+#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 04c99f36ff7f..6eb311fb2da0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/ptrace.h
*
* Copyright (C) 1996-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PTRACE_H
#define __ASM_ARM_PTRACE_H
@@ -13,10 +10,21 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
struct pt_regs {
unsigned long uregs[18];
};
+struct svc_pt_regs {
+ struct pt_regs regs;
+ u32 dacr;
+ u32 ttbcr;
+};
+
+#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
@@ -27,9 +35,13 @@ struct pt_regs {
#define thumb_mode(regs) (0)
#endif
+#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
- ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \
- (((regs)->ARM_cpsr & PSR_T_BIT) >> 5))
+ (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
+ FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
+#else
+#define isa_mode(regs) 1 /* Thumb */
+#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
@@ -80,6 +92,18 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) (regs)->ARM_pc
+#ifdef CONFIG_THUMB2_KERNEL
+#define frame_pointer(regs) (regs)->ARM_r7
+#else
+#define frame_pointer(regs) (regs)->ARM_fp
+#endif
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
@@ -100,8 +124,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
/*
* kprobe-based event tracer support
*/
-#include <linux/stddef.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
extern int regs_query_register_offset(const char *name);
@@ -138,10 +161,42 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
-#define current_pt_regs(void) ({ \
- register unsigned long sp asm ("sp"); \
- (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
+#define current_pt_regs(void) ({ (struct pt_regs *) \
+ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->ARM_r0 = rc;
+}
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+{
+ if ((cpsr & 0x06000400) == 0) {
+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+ cpsr &= ~PSR_IT_MASK;
+ } else {
+ /* We need to shift left ITSTATE<4:0> */
+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+ unsigned long it = cpsr & mask;
+ it <<= 1;
+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+ it &= mask;
+ cpsr &= ~mask;
+ cpsr |= it;
+ }
+ return cpsr;
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
deleted file mode 100644
index cefdb8f898a1..000000000000
--- a/arch/arm/include/asm/scatterlist.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASMARM_SCATTERLIST_H
-#define _ASMARM_SCATTERLIST_H
-
-#ifdef CONFIG_ARM_HAS_SG_CHAIN
-#define ARCH_HAS_SG_CHAIN
-#endif
-
-#include <asm/memory.h>
-#include <asm/types.h>
-#include <asm-generic/scatterlist.h>
-
-#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
deleted file mode 100644
index 2389b71a8e7c..000000000000
--- a/arch/arm/include/asm/sched_clock.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/* You shouldn't include this file. Use linux/sched_clock.h instead.
- * Temporary file until all asm/sched_clock.h users are gone
- */
-#include <linux/sched_clock.h>
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
index 52b156b341f5..e9ad0f37d2ba 100644
--- a/arch/arm/include/asm/seccomp.h
+++ b/arch/arm/include/asm/seccomp.h
@@ -1,11 +1,11 @@
-#ifndef _ASM_ARM_SECCOMP_H
-#define _ASM_ARM_SECCOMP_H
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
-#include <linux/unistd.h>
+#include <asm-generic/seccomp.h>
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_ARM
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "arm"
-#endif /* _ASM_ARM_SECCOMP_H */
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
new file mode 100644
index 000000000000..700b8bcdf9bd
--- /dev/null
+++ b/arch/arm/include/asm/sections.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_SECTIONS_H
+#define _ASM_ARM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _exiprom[];
+
+extern char __idmap_text_start[];
+extern char __idmap_text_end[];
+extern char __entry_text_start[];
+extern char __entry_text_end[];
+
+static inline bool in_entry_text(unsigned long addr)
+{
+ return memory_contains(__entry_text_start, __entry_text_end,
+ (void *)addr, 1);
+}
+
+static inline bool in_idmap_text(unsigned long addr)
+{
+ void *a = (void *)addr;
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
+}
+
+#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/secure_cntvoff.h b/arch/arm/include/asm/secure_cntvoff.h
new file mode 100644
index 000000000000..1f93aee1f630
--- /dev/null
+++ b/arch/arm/include/asm/secure_cntvoff.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_ARCH_CNTVOFF_H
+#define __ASMARM_ARCH_CNTVOFF_H
+
+extern void secure_cntvoff_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/semihost.h b/arch/arm/include/asm/semihost.h
new file mode 100644
index 000000000000..f365787e7c23
--- /dev/null
+++ b/arch/arm/include/asm/semihost.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Adapted for ARM and earlycon:
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Rob Herring <robh@kernel.org>
+ */
+
+#ifndef _ARM_SEMIHOST_H_
+#define _ARM_SEMIHOST_H_
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEMIHOST_SWI "0xab"
+#else
+#define SEMIHOST_SWI "0x123456"
+#endif
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("mov r1, %0\n"
+ "mov r0, #3\n"
+ "svc " SEMIHOST_SWI "\n"
+ : : "r" (&c) : "r0", "r1", "memory");
+}
+
+#endif /* _ARM_SEMIHOST_H_ */
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
new file mode 100644
index 000000000000..0211b9c5b14d
--- /dev/null
+++ b/arch/arm/include/asm/set_memory.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 1999-2002 Russell King
+ */
+
+#ifndef _ASMARM_SET_MEMORY_H
+#define _ASMARM_SET_MEMORY_H
+
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f05609501..cc106f946c69 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -1,56 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/asm/setup.h
*
* Copyright (C) 1997-1999 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See Documentation/arch/arm/setup.rst
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
+#include <linux/screen_info.h>
#include <uapi/asm/setup.h>
-#define __tag __used __attribute__((__section__(".taglist.init")))
+#define __tag __used __section(".taglist.init")
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
-/*
- * Memory map description
- */
-#define NR_BANKS CONFIG_ARM_NR_BANKS
-
-struct membank {
- phys_addr_t start;
- phys_addr_t size;
- unsigned int highmem;
-};
-
-struct meminfo {
- int nr_banks;
- struct membank bank[NR_BANKS];
-};
-
-extern struct meminfo meminfo;
+extern int arm_add_memory(u64 start, u64 size);
+extern __printf(1, 2) void early_print(const char *str, ...);
+extern void dump_machine_table(void);
-#define for_each_bank(iter,mi) \
- for (iter = 0; iter < (mi)->nr_banks; iter++)
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(const struct tag *tags);
+#else
+static inline void save_atags(const struct tag *tags) { }
+#endif
-#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
-#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
-#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
-#define bank_phys_start(bank) (bank)->start
-#define bank_phys_end(bank) ((bank)->start + (bank)->size)
-#define bank_phys_size(bank) (bank)->size
+struct machine_desc;
+void init_default_cache_policy(unsigned long);
+void paging_init(const struct machine_desc *desc);
+void early_mm_init(const struct machine_desc *);
+void adjust_lowmem_bounds(void);
+void setup_dma_zone(const struct machine_desc *desc);
-extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
-extern void early_print(const char *str, ...);
-extern void dump_machine_table(void);
+#ifdef CONFIG_VGA_CONSOLE
+extern struct screen_info vgacon_screen_info;
+#endif
#endif
diff --git a/arch/arm/include/asm/shmparam.h b/arch/arm/include/asm/shmparam.h
index a5223b3a9bf9..367a9dac6150 100644
--- a/arch/arm/include/asm/shmparam.h
+++ b/arch/arm/include/asm/shmparam.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_SHMPARAM_H
#define _ASMARM_SHMPARAM_H
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index c0eb412aff04..8b84092d1518 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_SIGNAL_H
#define _ASMARM_SIGNAL_H
@@ -16,7 +17,14 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER)
+
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
+
+void do_rseq_syscall(struct pt_regs *regs);
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
#endif
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
new file mode 100644
index 000000000000..8549fa8b7253
--- /dev/null
+++ b/arch/arm/include/asm/simd.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SIMD_H
+#define _ASM_SIMD_H
+
+#include <linux/cleanup.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/neon.h>
+
+static __must_check inline bool may_use_simd(void)
+{
+ return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq()
+ && !irqs_disabled();
+}
+
+DEFINE_LOCK_GUARD_0(ksimd, kernel_neon_begin(), kernel_neon_end())
+
+#define scoped_ksimd() scoped_guard(ksimd)
+
+#endif /* _ASM_SIMD_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a8cae71caceb..8c05a7f374d8 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/smp.h
*
* Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_SMP_H
#define __ASM_ARM_SMP_H
@@ -28,11 +25,6 @@ struct seq_file;
extern void show_ipi_list(struct seq_file *, int);
/*
- * Called from assembly code, this handles an IPI.
- */
-asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
-
-/*
* Called from C code, this handles an IPI.
*/
void handle_IPI(int ipinr, struct pt_regs *regs);
@@ -42,23 +34,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
*/
extern void smp_init_cpus(void);
-
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
-
-/*
- * Boot a secondary CPU, and assign it the specified idle task.
- * This also gives us the initial stack to use for this CPU.
- */
-extern int boot_secondary(unsigned int cpu, struct task_struct *);
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from platform specific assembly code, this is the
* secondary CPU entry point.
*/
-asmlinkage void secondary_start_kernel(void);
+asmlinkage void secondary_start_kernel(struct task_struct *task);
/*
@@ -66,24 +51,27 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
union {
- unsigned long mpu_rgn_szr;
- unsigned long pgdir;
+ struct mpu_rgn_info *mpu_rgn_info;
+ u64 pgdir;
};
unsigned long swapper_pg_dir;
void *stack;
+ struct task_struct *task;
};
extern struct secondary_data secondary_data;
-extern volatile int pen_release;
+extern void secondary_startup(void);
+extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
+static inline void __cpu_die(unsigned int cpu) { }
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
@@ -107,14 +95,24 @@ struct smp_operations {
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
};
+struct of_cpu_method {
+ const char *method;
+ const struct smp_operations *ops;
+};
+
+#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpu_method __cpu_method_of_table_##name \
+ __used __section("__cpu_method_of_table") \
+ = { .method = _method, .ops = _ops }
/*
* set platform specific SMP operations
*/
-extern void smp_set_ops(struct smp_operations *);
+extern void smp_set_ops(const struct smp_operations *);
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd4..f2c36acf9886 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM specific SMP header, this contains our implementation
* details.
@@ -8,6 +9,7 @@
#include <linux/cpumask.h>
#include <linux/err.h>
+#include <asm/cpu.h>
#include <asm/cputype.h>
/*
@@ -25,6 +27,20 @@ static inline bool is_smp(void)
#endif
}
+/**
+ * smp_cpuid_part() - return part id for a given cpu
+ * @cpu: logical cpu id.
+ *
+ * Return: part id of logical cpu passed as argument.
+ */
+static inline unsigned int smp_cpuid_part(int cpu)
+{
+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
+
+ return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK :
+ read_cpuid_part();
+}
+
/* all SMP configurations have the extended CPUID registers */
#ifndef CONFIG_MMU
#define tlb_ops_need_broadcast() 0
@@ -88,4 +104,17 @@ static inline u32 mpidr_hash_size(void)
{
return 1 << mpidr_hash.bits;
}
+
+extern int platform_can_secondary_boot(void);
+extern int platform_can_cpu_hotplug(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 0393fbab8dd5..b818e5d0cd78 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H
@@ -7,11 +8,12 @@
#ifndef __ASSEMBLER__
+#include <linux/errno.h>
#include <asm/cputype.h>
static inline bool scu_a9_has_base(void)
{
- return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+ return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
}
static inline unsigned long scu_a9_get_base(void)
@@ -26,6 +28,8 @@ static inline unsigned long scu_a9_get_base(void)
#ifdef CONFIG_HAVE_ARM_SCU
unsigned int scu_get_core_count(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
+int scu_cpu_power_enable(void __iomem *, unsigned int);
+int scu_get_cpu_power_mode(void __iomem *scu_base, unsigned int logical_cpu);
#else
static inline unsigned int scu_get_core_count(void __iomem *scu_base)
{
@@ -35,6 +39,16 @@ static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
return -EINVAL;
}
+static inline int scu_cpu_power_enable(void __iomem *scu_base,
+ unsigned int mode)
+{
+ return -EINVAL;
+}
+static inline int scu_get_cpu_power_mode(void __iomem *scu_base,
+ unsigned int logical_cpu)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU)
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 7b2899c2f7fc..c729d2113a24 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_SMP_TWD_H
#define __ASMARM_SMP_TWD_H
@@ -18,20 +19,4 @@
#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-#include <linux/ioport.h>
-
-struct twd_local_timer {
- struct resource res[2];
-};
-
-#define DEFINE_TWD_LOCAL_TIMER(name,base,irq) \
-struct twd_local_timer name __initdata = { \
- .res = { \
- DEFINE_RES_MEM(base, 0x10), \
- DEFINE_RES_IRQ(irq), \
- }, \
-};
-
-int twd_local_timer_register(struct twd_local_timer *);
-
#endif
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index 00098615c6f0..421e3415338a 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_SPARSEMEM_H
#define ASMARM_SPARSEMEM_H
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Two definitions are required for sparsemem:
@@ -15,10 +16,11 @@
* Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
* then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
*
- * Define these in your mach/memory.h.
+ * These can be overridden in your mach/memory.h.
*/
-#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS)
-#error Sparsemem is not supported on this platform
+#if !defined(MAX_PHYSMEM_BITS) || !defined(SECTION_SIZE_BITS)
+#define MAX_PHYSMEM_BITS 36
+#define SECTION_SIZE_BITS 28
#endif
#endif
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644
index 000000000000..d9c28b3b6b62
--- /dev/null
+++ b/arch/arm/include/asm/spectre.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+enum {
+ __SPECTRE_V2_METHOD_BPIALL,
+ __SPECTRE_V2_METHOD_ICIALLU,
+ __SPECTRE_V2_METHOD_SMC,
+ __SPECTRE_V2_METHOD_HVC,
+ __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+ SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+ SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+ SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+ SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+ SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+#else
+static inline void spectre_v2_update_state(unsigned int state,
+ unsigned int methods)
+{}
+#endif
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+void cpu_v7_ca8_ibe(void);
+void cpu_v7_ca15_ibe(void);
+void cpu_v7_bugs_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e9..f610a773f2be 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
@@ -5,21 +6,15 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+#include <linux/prefetch.h>
+#include <asm/barrier.h>
#include <asm/processor.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-#define ALT_SMP(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-
#ifdef CONFIG_THUMB2_KERNEL
-#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -27,35 +22,27 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) ALT_SMP( \
+#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
@@ -66,17 +53,13 @@ static inline void dsb_sev(void)
* memory.
*/
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
+ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -89,7 +72,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
@@ -100,6 +83,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned long contended, res;
u32 slock;
+ prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
@@ -107,7 +91,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
@@ -127,15 +111,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
return (tickets.next - tickets.owner) > 1;
}
#define arch_spin_is_contended arch_spin_is_contended
@@ -152,6 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -168,17 +157,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
- if (tmp == 0) {
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -199,9 +192,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
dsb_sev();
}
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -218,12 +208,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
+" .syntax unified\n"
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
WFE("mi")
-" rsbpls %0, %1, #0\n"
+" rsbspl %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
@@ -238,6 +230,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -254,28 +247,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
- smp_mb();
- return tmp2 == 0;
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f8b478..5404a2a96bf3 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "Please do not include this file directly."
#endif
#define TICKET_SHIFT 16
@@ -25,7 +26,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
- volatile unsigned int lock;
+ u32 lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
index de003327be97..0bd4979759f1 100644
--- a/arch/arm/include/asm/stackprotector.h
+++ b/arch/arm/include/asm/stackprotector.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* GCC stack protector support.
*
@@ -5,15 +6,16 @@
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
- * we cannot have a different canary value per task.
+ * "__stack_chk_guard" on ARM. This prevents SMP systems from using a
+ * different value for each task unless we enable a GCC plugin that
+ * replaces these symbol references with references to each task's own
+ * value.
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
-#include <linux/random.h>
-#include <linux/version.h>
+#include <asm/thread_info.h>
extern unsigned long __stack_chk_guard;
@@ -25,14 +27,12 @@ extern unsigned long __stack_chk_guard;
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= LINUX_VERSION_CODE;
+ unsigned long canary = get_random_canary();
current->stack_canary = canary;
+#ifndef CONFIG_STACKPROTECTOR_PER_TASK
__stack_chk_guard = current->stack_canary;
+#endif
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 4d0a16441b29..ba2f771cca23 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -1,15 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
+#include <linux/llist.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+
struct stackframe {
+ /*
+ * FP member should hold R7 when CONFIG_THUMB2_KERNEL is enabled
+ * and R11 otherwise.
+ */
unsigned long fp;
unsigned long sp;
unsigned long lr;
unsigned long pc;
+
+ /* address of the LR value on the stack */
+ unsigned long *lr_addr;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+ struct task_struct *tsk;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ bool ex_frame;
+#endif
};
+static inline bool on_thread_stack(void)
+{
+ unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack;
+
+ return delta < THREAD_SIZE;
+}
+
+static __always_inline
+void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
+{
+ frame->fp = frame_pointer(regs);
+ frame->sp = regs->ARM_sp;
+ frame->lr = regs->ARM_lr;
+ frame->pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = current;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ frame->ex_frame = in_entry_text(frame->pc);
+#endif
+}
+
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data);
+ bool (*fn)(void *, unsigned long), void *data);
+extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index cf4f3aad0fc1..6c607c68f3ad 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -1,9 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_STRING_H
#define __ASM_ARM_STRING_H
/*
* We don't do inline string functions, since the
* optimised inline asm versions are not small.
+ *
+ * The __underscore versions of some functions are for KASan to be able
+ * to replace them with instrumented versions.
*/
#define __HAVE_ARCH_STRRCHR
@@ -14,28 +18,51 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
-extern void __memzero(void *ptr, __kernel_size_t n);
-
-#define memset(p,v,n) \
- ({ \
- void *__p = (p); size_t __n = n; \
- if ((__n) != 0) { \
- if (__builtin_constant_p((v)) && (v) == 0) \
- __memzero((__p),(__n)); \
- else \
- memset((__p),(v),(__n)); \
- } \
- (__p); \
- })
+#define __HAVE_ARCH_MEMSET32
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+ return __memset32(p, v, n * 4);
+}
+
+#define __HAVE_ARCH_MEMSET64
+extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+ return __memset64(p, v, n * 8, v >> 32);
+}
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * must use non-instrumented versions of the mem*
+ * functions named __memcpy() etc. All such kernel code has
+ * been tagged with KASAN_SANITIZE_file.o = n, which means
+ * that the address sanitization argument isn't passed to the
+ * compiler, and __SANITIZE_ADDRESS__ is not set. As a result
+ * these defines kick in.
+ */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index cd20029bcd94..be81b9ca2ea1 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -1,12 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H
+#include <linux/types.h>
+
struct sleep_save_sp {
u32 *save_ptr_stash;
u32 save_ptr_stash_phys;
};
extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
+extern void cpu_resume_arm(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
+extern void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr);
#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index 537fc9b91889..c6051823048b 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/byteorder.h
*
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index fa09e6b49bf1..9372348516ce 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -1,7 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SWITCH_TO_H
#define __ASM_ARM_SWITCH_TO_H
#include <linux/thread_info.h>
+#include <asm/smp_plat.h>
+
+/*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
+#endif
/*
* switch_to(prev, next) should switch from task `prev' to `next'
@@ -12,6 +26,9 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
+ if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
+ __this_cpu_write(__entry_task, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf76..f46b3c570f92 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SYNC_BITOPS_H__
#define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h>
-#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined.
@@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
+/*
+ * Unordered
+ */
+
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_mb__before_atomic(); \
+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
+ __smp_mb__after_atomic(); \
+ __ret; \
+})
#endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index f1d96d4e8092..18b102a30741 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access to user system call parameters and results
*
@@ -7,7 +8,7 @@
#ifndef _ASM_ARM_SYSCALL_H
#define _ASM_ARM_SYSCALL_H
-#include <linux/audit.h> /* for AUDIT_ARCH_* */
+#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
#include <linux/elf.h> /* for ELF_EM */
#include <linux/err.h>
#include <linux/sched.h>
@@ -21,7 +22,24 @@ extern const unsigned long sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return task_thread_info(task)->syscall;
+ if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ return task_thread_info(task)->abi_syscall;
+
+ if (task_thread_info(task)->abi_syscall == -1)
+ return -1;
+
+ return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+}
+
+static inline bool __in_oabi_syscall(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_OABI_COMPAT) &&
+ (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
+}
+
+static inline bool in_oabi_syscall(void)
+{
+ return __in_oabi_syscall(current);
}
static inline void syscall_rollback(struct task_struct *task,
@@ -50,55 +68,56 @@ static inline void syscall_set_return_value(struct task_struct *task,
regs->ARM_r0 = (long) error ? error : val;
}
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ if (nr == -1) {
+ task_thread_info(task)->abi_syscall = -1;
+ /*
+ * When the syscall number is set to -1, the syscall will be
+ * skipped. In this case the syscall return value has to be
+ * set explicitly, otherwise the first syscall argument is
+ * returned as the syscall return value.
+ */
+ syscall_set_return_value(task, regs, -ENOSYS, 0);
+ return;
+ }
+ if ((IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))) {
+ task_thread_info(task)->abi_syscall = nr;
+ return;
+ }
+ task_thread_info(task)->abi_syscall =
+ (task_thread_info(task)->abi_syscall & ~__NR_SYSCALL_MASK) |
+ (nr & __NR_SYSCALL_MASK);
+}
+
#define SYSCALL_MAX_ARGS 7
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- if (i + n > SYSCALL_MAX_ARGS) {
- unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
- unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
- pr_warning("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- args[0] = regs->ARM_ORIG_r0;
- args++;
- i++;
- n--;
- }
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
- memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+ memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (i + n > SYSCALL_MAX_ARGS) {
- pr_warning("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- regs->ARM_ORIG_r0 = args[0];
- args++;
- i++;
- n--;
- }
-
- memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+ memcpy(&regs->ARM_r0, args, 6 * sizeof(args[0]));
+ /*
+ * Also copy the first argument into ARM_ORIG_r0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->ARM_ORIG_r0 = regs->ARM_r0;
}
-static inline int syscall_get_arch(struct task_struct *task,
- struct pt_regs *regs)
+static inline int syscall_get_arch(struct task_struct *task)
{
/* ARM tasks don't change audit architectures on the fly. */
return AUDIT_ARCH_ARM;
diff --git a/arch/arm/include/asm/syscalls.h b/arch/arm/include/asm/syscalls.h
new file mode 100644
index 000000000000..5912e7cffa6a
--- /dev/null
+++ b/arch/arm/include/asm/syscalls.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_SYSCALLS_H
+#define __ASM_SYSCALLS_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+ loff_t offset, loff_t len);
+
+struct oldabi_stat64;
+asmlinkage long sys_oabi_stat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_lstat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstatat64(int dfd,
+ const char __user *filename,
+ struct oldabi_stat64 __user *statbuf,
+ int flag);
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+struct oabi_epoll_event;
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event);
+struct oabi_sembuf;
+struct old_timespec32;
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout);
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops);
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth);
+struct sockaddr;
+asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
+ size_t len, unsigned flags,
+ struct sockaddr __user *addr,
+ int addrlen);
+struct user_msghdr;
+asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args);
+
+#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1c..000000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
-#include <asm/barrier.h>
-#include <asm/compiler.h>
-#include <asm/cmpxchg.h>
-#include <asm/switch_to.h>
-#include <asm/system_info.h>
-#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index 720ea0320a6d..ef7fdb588b5f 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SYSTEM_INFO_H
#define __ASM_ARM_SYSTEM_INFO_H
@@ -17,6 +18,7 @@
/* information about the system we're running on */
extern unsigned int system_rev;
+extern const char *system_serial;
extern unsigned int system_serial_low;
extern unsigned int system_serial_high;
extern unsigned int mem_fclk_21285;
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index a3d61ad984af..98b37340376b 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_SYSTEM_MISC_H
#define __ASM_ARM_SYSTEM_MISC_H
@@ -7,13 +8,27 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/reboot.h>
+#include <linux/percpu.h>
extern void cpu_init(void);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+typedef void (*harden_branch_predictor_fn_t)(void);
+DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+static inline void harden_branch_predictor(void)
+{
+ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
+ smp_processor_id());
+ if (fn)
+ fn();
+}
+#else
+#define harden_branch_predictor() do { } while (0)
+#endif
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
index 8578d726ad78..e1f7dca86a22 100644
--- a/arch/arm/include/asm/tcm.h
+++ b/arch/arm/include/asm/tcm.h
@@ -1,33 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
*
* Author: Rickard Andersson <rickard.andersson@stericsson.com>
* Author: Linus Walleij <linus.walleij@stericsson.com>
- *
*/
#ifndef __ASMARM_TCM_H
#define __ASMARM_TCM_H
-#ifndef CONFIG_HAVE_TCM
-#error "You should not be including tcm.h unless you have a TCM!"
-#endif
+#ifdef CONFIG_HAVE_TCM
#include <linux/compiler.h>
/* Tag variables with this */
-#define __tcmdata __section(.tcm.data)
+#define __tcmdata __section(".tcm.data")
/* Tag constants with this */
-#define __tcmconst __section(.tcm.rodata)
+#define __tcmconst __section(".tcm.rodata")
/* Tag functions inside TCM called from outside TCM with this */
-#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
+#define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline
/* Tag function inside TCM called from inside TCM with this */
-#define __tcmlocalfunc __section(.tcm.text)
+#define __tcmlocalfunc __section(".tcm.text")
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
bool tcm_dtcm_present(void);
bool tcm_itcm_present(void);
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+static inline void tcm_init(void)
+{
+}
+#endif
#endif
diff --git a/arch/arm/include/asm/text-patching.h b/arch/arm/include/asm/text-patching.h
new file mode 100644
index 000000000000..0b48247c4600
--- /dev/null
+++ b/arch/arm/include/asm/text-patching.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, false);
+}
+
+#endif
diff --git a/arch/arm/include/asm/therm.h b/arch/arm/include/asm/therm.h
index f002f0197d78..17b0bc9b5e9b 100644
--- a/arch/arm/include/asm/therm.h
+++ b/arch/arm/include/asm/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/therm.h: Definitions for Dallas Semiconductor
* DS1620 thermometer driver (as used in the Rebel.com NetWinder)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089a..943ffcf069d2 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/thread_info.h
*
* Copyright (C) 2002 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_THREAD_INFO_H
#define __ASM_ARM_THREAD_INFO_H
@@ -14,20 +11,36 @@
#include <linux/compiler.h>
#include <asm/fpstate.h>
+#include <asm/page.h>
+#ifdef CONFIG_KASAN
+/*
+ * KASan uses a lot of extra stack space so the thread size order needs to
+ * be increased.
+ */
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
-#define THREAD_SIZE 8192
+#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
#ifndef __ASSEMBLY__
struct task_struct;
-struct exec_domain;
-#include <asm/types.h>
-#include <asm/domain.h>
+DECLARE_PER_CPU(struct task_struct *, __entry_task);
-typedef unsigned long mm_segment_t;
+#include <asm/types.h>
+#include <asm/traps.h>
struct cpu_context_save {
__u32 r4;
@@ -50,66 +63,41 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
- __u32 syscall; /* syscall number */
- __u8 used_cp[16]; /* thread used copro */
+ __u32 abi_syscall; /* ABI type and syscall nr */
unsigned long tp_value[2]; /* TLS registers */
-#ifdef CONFIG_CRUNCH
- struct crunch_state crunchstate;
-#endif
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
#ifdef CONFIG_ARM_THUMBEE
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
- struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-static inline struct thread_info *current_thread_info(void)
+static inline struct task_struct *thread_task(struct thread_info* ti)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct task_struct *)ti;
}
#define thread_saved_pc(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
#define thread_saved_sp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
#define thread_saved_fp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
-
-extern void crunch_task_disable(struct thread_info *);
-extern void crunch_task_copy(struct thread_info *, void *);
-extern void crunch_task_restore(struct thread_info *, void *);
-extern void crunch_task_release(struct thread_info *);
+#else
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
@@ -117,54 +105,64 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
extern void iwmmxt_task_release(struct thread_info *);
extern void iwmmxt_task_switch(struct thread_info *);
+extern int iwmmxt_undef_handler(struct pt_regs *, u32);
+
+static inline void register_iwmmxt_undef_handler(void)
+{
+ static struct undef_hook iwmmxt_undef_hook = {
+ .instr_mask = 0x0c000e00,
+ .instr_val = 0x0c000000,
+ .cpsr_mask = MODE_MASK | PSR_T_BIT,
+ .cpsr_val = USR_MODE,
+ .fn = iwmmxt_undef_handler,
+ };
+
+ register_undef_hook(&iwmmxt_undef_hook);
+}
+
extern void vfp_sync_hwstate(struct thread_info *);
extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
#endif
/*
- * We use bit 30 of the preempt_count to indicate that kernel
- * preemption is occurring. See <asm/hardirq.h>.
- */
-#define PREEMPT_ACTIVE 0x40000000
-
-/*
* thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCAL_AUDIT - syscall auditing active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
+ *
+ * Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
*/
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
-#define TIF_NOHZ 12 /* in adaptive nohz mode */
+#define TIF_UPROBE 3 /* breakpointed or singlestepping */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
+
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
+#define TIF_RESTORE_SIGMASK 19
+#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
/* Checks for any syscall work in entry-common.S */
@@ -174,7 +172,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index 1dc980675894..1c1542e2ed63 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/thread_notify.h
*
* Copyright (C) 2006 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_THREAD_NOTIFY_H
#define ASMARM_THREAD_NOTIFY_H
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 83f2aa83899c..6d1337c169cd 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -1,24 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/timex.h
*
* Copyright (C) 1997,1998 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Architecture Specific TIME specifications
*/
#ifndef _ASMARM_TIMEX_H
#define _ASMARM_TIMEX_H
-#ifdef CONFIG_ARCH_MULTIPLATFORM
-#define CLOCK_TICK_RATE 1000000
-#else
-#include <mach/timex.h>
-#endif
-
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..ea4fbe7b17f6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/tlb.h
*
* Copyright (C) 2002 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Experimentation shows that on a StrongARM, it appears to be faster
* to use the "invalidate whole tlb" rather than "invalidate single
* tlb" for this.
@@ -29,192 +26,35 @@
#else /* !CONFIG_MMU */
-#include <linux/swap.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-
-#define MMU_GATHER_BUNDLE 8
-
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- struct vm_area_struct *vma;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
- unsigned int max;
- struct page **pages;
- struct page *local[MMU_GATHER_BUNDLE];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(struct page *);
- }
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush(tlb);
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-}
-
-static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-{
- tlb->mm = mm;
- tlb->fullmm = fullmm;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- __tlb_alloc_page(tlb);
-}
-
-static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
+#include <asm-generic/tlb.h>
static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
+ struct ptdesc *ptdesc = page_ptdesc(pte);
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->pages[tlb->nr++] = page;
- VM_BUG_ON(tlb->nr > tlb->max);
- return tlb->max - tlb->nr;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (!__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
-{
- pgtable_page_dtor(pte);
-
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
-#else
+#ifndef CONFIG_ARM_LPAE
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
- addr &= PMD_MASK;
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
- tlb_add_flush(tlb, addr + SZ_1M);
+ addr = (addr & PMD_MASK) + SZ_1M;
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_page(tlb, pte);
+ tlb_remove_ptdesc(tlb, ptdesc);
}
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long addr)
+static inline void
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pmdp));
-#endif
-}
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
-static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
+ tlb_remove_ptdesc(tlb, ptdesc);
+#endif
}
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
-
#endif /* CONFIG_MMU */
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e369745..38c6e4a2a0b6 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -1,15 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/tlbflush.h
*
* Copyright (C) 1999-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
+#ifndef __ASSEMBLY__
+# include <linux/mm_types.h>
+#endif
+
#ifdef CONFIG_MMU
#include <asm/glue.h>
@@ -252,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -260,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
@@ -319,67 +313,110 @@ extern struct cpu_tlb_fns cpu_tlb;
#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
-static inline void local_flush_tlb_all(void)
+static inline void __local_flush_tlb_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_WB))
- dsb();
-
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
- tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(nsh);
isb();
}
}
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void __flush_tlb_all(void)
{
const int zero = 0;
- const int asid = ASID(mm);
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(ish);
+ isb();
+ }
+}
+
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
- if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
}
- put_cpu();
}
tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_mm(mm);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_mm(mm);
#ifdef CONFIG_ARM_ERRATA_720789
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
#else
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
static inline void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
- if (tlb_flag(TLB_WB))
- dsb();
-
if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
@@ -392,6 +429,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_page(vma, uaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_page(vma, uaddr);
#ifdef CONFIG_ARM_ERRATA_720789
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
#else
@@ -399,19 +466,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
-static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- kaddr &= PAGE_MASK;
-
- if (tlb_flag(TLB_WB))
- dsb();
-
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -421,42 +483,76 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_kernel_page(kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(nsh);
+ isb();
+ }
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_kernel_page(kaddr);
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(ish);
isb();
}
}
-static inline void local_flush_bp_all(void)
+/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_V7_UIS_BP))
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
- else if (tlb_flag(TLB_V6_BP))
+ if (tlb_flag(TLB_V6_BP))
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
-
- if (tlb_flag(TLB_BARRIER))
- isb();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline void dummy_flush_tlb_a15_erratum(void)
+static inline void local_flush_bp_all(void)
{
- /*
- * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
- */
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb();
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
}
-#else
-static inline void dummy_flush_tlb_a15_erratum(void)
+
+static inline void __flush_bp_all(void)
{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
-#endif
/*
* flush_pmd_entry
@@ -479,7 +575,7 @@ static inline void flush_pmd_entry(void *pmd)
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
}
static inline void clean_pmd_entry(void *pmd)
@@ -523,18 +619,22 @@ extern void flush_bp_all(void);
* If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page. On ARMv6 and later, the cache coherency is handled via
- * the set_pte_at() function.
+ * the set_ptes() function.
*/
#if __LINUX_ARM_ARCH__ < 6
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep);
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
#else
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ unsigned int nr)
{
}
#endif
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif
@@ -542,9 +642,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#elif defined(CONFIG_SMP) /* !CONFIG_MMU */
#ifndef __ASSEMBLY__
-
-#include <linux/mm_types.h>
-
static inline void local_flush_tlb_all(void) { }
static inline void local_flush_tlb_mm(struct mm_struct *mm) { }
static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { }
@@ -564,4 +661,21 @@ extern void flush_bp_all(void);
#endif
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+ if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+ erratum_a15_798181_handler))
+ return erratum_a15_798181_handler();
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 83259b873333..3dcd0f71a0da 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -1,6 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASMARM_TLS_H
#define __ASMARM_TLS_H
+#include <linux/compiler.h>
+#include <asm/thread_info.h>
+
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
@@ -8,48 +12,106 @@
.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
- mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
+ @ TLS register update is deferred until return to user space
+ mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
- ldr \tmp1, =elf_hwcap
- ldr \tmp1, [\tmp1, #0]
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+ .subsection 1
+#endif
+.L0_\@:
+ ldr_va \tmp1, elf_hwcap
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
- mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
- strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+ beq .L2_\@
+ mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+#ifdef CONFIG_SMP
+ b .L1_\@
+ .previous
+#endif
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
.endm
.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
+#else
+#include <asm/smp_plat.h>
#endif
#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu 1
#define has_tls_reg 1
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
+#define defer_tls_reg_update is_smp()
#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
+#define defer_tls_reg_update 1
#define switch_tls switch_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_software
#endif
#ifndef __ASSEMBLY__
+
+static inline void set_tls(unsigned long val)
+{
+ struct thread_info *thread;
+
+ thread = current_thread_info();
+
+ thread->tp_value[0] = val;
+
+ /*
+ * This code runs with preemption enabled and therefore must
+ * be reentrant with respect to switch_tls.
+ *
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ *
+ * If we're preempted here, switch_tls will load TPIDRURO from
+ * thread_info upon resuming execution and the following mcr
+ * is merely redundant.
+ */
+ barrier();
+
+ if (!tls_emu) {
+ if (has_tls_reg && !defer_tls_reg_update) {
+ asm("mcr p15, 0, %0, c13, c0, 3"
+ : : "r" (val));
+ } else if (!has_tls_reg) {
+#ifdef CONFIG_KUSER_HELPERS
+ /*
+ * User space must never try to access this
+ * directly. Expect your app to break
+ * eventually if you do so. The user helper
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
+ *((unsigned int *)0xffff0ff0) = val;
+#endif
+ }
+
+ }
+}
+
static inline unsigned long get_tpuser(void)
{
unsigned long reg = 0;
@@ -59,5 +121,23 @@ static inline unsigned long get_tpuser(void)
return reg;
}
+
+static inline void set_tpuser(unsigned long val)
+{
+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
+ * we need not update thread_info.
+ */
+ if (has_tls_reg && !tls_emu) {
+ asm("mcr p15, 0, %0, c13, c0, 2"
+ : : "r" (val));
+ }
+}
+
+static inline void flush_tls(void)
+{
+ set_tls(0);
+ set_tpuser(0);
+}
+
#endif
#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd2..ad36b6570067 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -1,31 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
#ifdef CONFIG_ARM_CPU_TOPOLOGY
#include <linux/cpumask.h>
+#include <linux/arch_topology.h>
+
+/* big.LITTLE switcher is incompatible with frequency invariance */
+#ifndef CONFIG_BL_SWITCHER
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
+#endif
-struct cputopo_arm {
- int thread_id;
- int core_id;
- int socket_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
-};
-
-extern struct cputopo_arm cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
-#define mc_capable() (cpu_topology[0].socket_id != -1)
-#define smt_capable() (cpu_topology[0].thread_id != -1)
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
+/* Replace task scheduler's default HW pressure API */
+#define arch_scale_hw_pressure topology_get_hw_pressure
+#define arch_update_hw_pressure topology_update_hw_pressure
#else
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f555bb3664dc..2621b9fb9b19 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_TRAP_H
#define _ASMARM_TRAP_H
+#include <linux/linkage.h>
#include <linux/list.h>
struct pt_regs;
@@ -18,7 +20,6 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
extern char __irqentry_text_start[];
@@ -27,29 +28,21 @@ static inline int __in_irqentry_text(unsigned long ptr)
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
- return 0;
-}
-#endif
-
-static inline int in_exception_text(unsigned long ptr)
-{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
- int in;
-
- in = ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
- return in ? : __in_irqentry_text(ptr);
-}
-
-extern void __init early_trap_init(void *);
-extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
-extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
+extern void early_trap_init(void *);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
+extern void ptrace_break(struct pt_regs *regs);
extern void *vectors_page;
+asmlinkage void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl);
+asmlinkage void do_undefinstr(struct pt_regs *regs);
+asmlinkage void handle_fiq_as_nmi(struct pt_regs *regs);
+asmlinkage void bad_mode(struct pt_regs *regs, int reason);
+asmlinkage int arm_syscall(int no, struct pt_regs *regs);
+asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs);
+asmlinkage void __div0(void);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+
#endif
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644
index 000000000000..4bccd895d954
--- /dev/null
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+ * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+ * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ orr \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ bic \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#else
+
+ .macro uaccess_disable, tmp, isb=1
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ .endm
+
+#endif
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...) x
+#else
+#define PAN(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ PAN( mrc p15, 0, \tmp0, c2, c0, 2)
+ PAN( str \tmp0, [sp, #SVC_TTBCR])
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ PAN( ldr \tmp0, [sp, #SVC_TTBCR])
+ PAN( mcr p15, 0, \tmp0, c2, c0, 2)
+ .endm
+
+#undef DACR
+#undef PAN
+
+#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..d6ae80b5df36 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/uaccess.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
@@ -11,36 +8,77 @@
/*
* User space memory access functions
*/
+#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/thread_info.h>
-#include <asm/errno.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/domain.h>
+#include <linux/unaligned.h>
#include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
#include <asm/compiler.h>
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
+#include <asm/extable.h>
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
*/
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+}
+
+static __always_inline void uaccess_restore(unsigned int flags)
+{
+ /* Restore the user access mask */
+ set_domain(flags);
+}
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_ttbcr = cpu_get_ttbcr();
+
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+ isb();
+
+ return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+ cpu_set_ttbcr(flags);
+ isb();
+}
+
+#else
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+ return 0;
+}
-struct exception_table_entry
+static inline void uaccess_restore(unsigned int flags)
{
- unsigned long insn, fixup;
-};
+}
-extern int fixup_exception(struct pt_regs *regs);
+#endif
/*
* These two are intentionally not defined anywhere - if the kernel
@@ -49,43 +87,42 @@ extern int fixup_exception(struct pt_regs *regs);
extern int __get_user_bad(void);
extern int __put_user_bad(void);
-/*
- * Note that this is actually 0x1,0000,0000
- */
-#define KERNEL_DS 0x00000000
-#define get_ds() (KERNEL_DS)
-
#ifdef CONFIG_MMU
-#define USER_DS TASK_SIZE
-#define get_fs() (current_thread_info()->addr_limit)
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-static inline void set_fs(mm_segment_t fs)
+/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
{
- current_thread_info()->addr_limit = fs;
- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " .syntax unified\n"
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subshs %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (TASK_SIZE)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
}
-#define segment_eq(a,b) ((a) == (b))
-
-#define __addr_ok(addr) ({ \
- unsigned long flag; \
- __asm__("cmp %2, %0; movlo %0, #0" \
- : "=&r" (flag) \
- : "0" (current_thread_info()->addr_limit), "r" (addr) \
- : "cc"); \
- (flag == 0); })
-
-/* We use 33-bit arithmetic here... */
-#define __range_ok(addr,size) ({ \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
- : "=&r" (flag), "=&r" (roksum) \
- : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; })
-
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@@ -100,51 +137,93 @@ static inline void set_fs(mm_segment_t fs)
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
+extern int __get_user_32t_8(void *);
+extern int __get_user_8(void *);
+extern int __get_user_64t_1(void *);
+extern int __get_user_64t_2(void *);
+extern int __get_user_64t_4(void *);
-#define __GUP_CLOBBER_1 "lr", "cc"
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#define __get_user_x(__r2, __p, __e, __l, __s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __get_user_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+ : "ip", "lr", "cc")
+
+/* narrowing a double-word get into a single 32bit word register: */
+#ifdef __ARMEB__
+#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
+ __get_user_x(__r2, __p, __e, __l, 32t_8)
#else
-#define __GUP_CLOBBER_2 "lr", "cc"
+#define __get_user_x_32t __get_user_x
#endif
-#define __GUP_CLOBBER_4 "lr", "cc"
-#define __get_user_x(__r2,__p,__e,__l,__s) \
+/*
+ * storing result into proper least significant word of 64bit target var,
+ * different only for big endian case where 64 bit __r2 lsw is r3:
+ */
+#ifdef __ARMEB__
+#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
__asmeq("%3", "r1") \
- "bl __get_user_" #__s \
+ "bl __get_user_64t_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
- : __GUP_CLOBBER_##__s)
+ : "ip", "lr", "cc")
+#else
+#define __get_user_x_64t __get_user_x
+#endif
+
-#define __get_user_check(x,p) \
+#define __get_user_check(x, p) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
- register unsigned long __r2 asm("r2"); \
+ unsigned long __limit = TASK_SIZE - 1; \
+ register typeof(*(p)) __user *__p asm("r0") = (p); \
+ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
+ int __tmp_e; \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, __l, 1); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 1); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __get_user_x(__r2, __p, __e, __l, 2); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 2); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, __l, 4); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 4); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ case 8: \
+ if (sizeof((x)) < 8) \
+ __get_user_x_32t(__r2, __p, __e, __l, 4); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __get_user_bad(); break; \
} \
+ __tmp_e = __e; \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
- __e; \
+ __tmp_e; \
})
-#define get_user(x,p) \
+#define get_user(x, p) \
({ \
might_fault(); \
- __get_user_check(x,p); \
+ __get_user_check(x, p); \
})
extern int __put_user_1(void *, unsigned int);
@@ -152,71 +231,41 @@ extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__l,__s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- __asmeq("%3", "r1") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2), "r" (__l) \
- : "ip", "lr", "cc")
-
-#define __put_user_check(x,p) \
+#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
- register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ unsigned long __limit = TASK_SIZE - 1; \
+ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
+ register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, __l, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, __l, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, __l, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, __l, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- __e; \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc"); \
+ __err = __e; \
})
-#define put_user(x,p) \
- ({ \
- might_fault(); \
- __put_user_check(x,p); \
- })
-
#else /* CONFIG_MMU */
-/*
- * uClinux has only one addr space, so has simplified address limits.
- */
-#define USER_DS KERNEL_DS
-
-#define segment_eq(a,b) (1)
-#define __addr_ok(addr) ((void)(addr),1)
-#define __range_ok(addr,size) ((void)(addr),0)
-#define get_fs() (KERNEL_DS)
-
-static inline void set_fs(mm_segment_t fs)
-{
-}
-
-#define get_user(x,p) __get_user(x,p)
-#define put_user(x,p) __put_user(x,p)
+#define get_user(x, p) __get_user(x, p)
+#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+#include <asm-generic/access_ok.h>
-#define user_addr_max() \
- (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
/*
* The "__xxx" versions of the user access functions do not verify the
@@ -227,39 +276,45 @@ static inline void set_fs(mm_segment_t fs)
* error occurs, and leave it unchanged on success. Note that these
* versions are void (ie, don't return a value as such).
*/
-#define __get_user(x,ptr) \
+#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x),(ptr),__gu_err); \
+ __get_user_err((x), (ptr), __gu_err, TUSER()); \
__gu_err; \
})
-#define __get_user_error(x,ptr,err) \
-({ \
- __get_user_err((x),(ptr),err); \
- (void) 0; \
-})
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_err(x,ptr,err) \
+#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
- unsigned long __gu_val; \
+ __long_type(x) __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
- case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
- case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
+ case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
+#endif
-#define __get_user_asm_byte(x,addr,err) \
+#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(ldrb) " %1,[%2],#0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
@@ -273,75 +328,112 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
+#define __get_user_asm_byte(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrh" __t)
+
+#else
+
#ifndef __ARMEB__
-#define __get_user_asm_half(x,__gu_addr,err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = __b1 | (__b2 << 8); \
})
#else
-#define __get_user_asm_half(x,__gu_addr,err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = (__b1 << 8) | __b2; \
})
#endif
-#define __get_user_asm_word(x,addr,err) \
- __asm__ __volatile__( \
- "1: " TUSER(ldr) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " mov %1, #0\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT) \
- : "cc")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __get_user_asm_word(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldr" __t)
-#define __put_user(x,ptr) \
+#ifdef __ARMEB__
+#define __WORD0_OFFS 4
+#define __WORD1_OFFS 0
+#else
+#define __WORD0_OFFS 0
+#define __WORD1_OFFS 4
+#endif
+
+#define __get_user_asm_dword(x, addr, err, __t) \
+ ({ \
+ unsigned long __w0, __w1; \
+ __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
+ __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
+ (x) = ((u64)__w1 << 32) | (u64) __w0; \
+})
+
+#define __put_user_switch(x, ptr, __err, __fn) \
+ do { \
+ const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ unsigned int __ua_flags; \
+ might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
+ case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
+ case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
+ case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ uaccess_restore(__ua_flags); \
+ } while (0)
+
+#define put_user(x, ptr) \
({ \
- long __pu_err = 0; \
- __put_user_err((x),(ptr),__pu_err); \
+ int __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
__pu_err; \
})
-#define __put_user_error(x,ptr,err) \
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
+#define __put_user(x, ptr) \
({ \
- __put_user_err((x),(ptr),err); \
- (void) 0; \
+ long __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
+ __pu_err; \
})
-#define __put_user_err(x,ptr,err) \
-do { \
- unsigned long __pu_addr = (unsigned long)(ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- might_fault(); \
- switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
- case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
- case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
- case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
- default: __put_user_bad(); \
- } \
-} while (0)
+#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
+ do { \
+ unsigned long __pu_addr = (unsigned long)__pu_ptr; \
+ __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
+ } while (0)
+
+#define __put_user_nocheck_1 __put_user_asm_byte
+#define __put_user_nocheck_2 __put_user_asm_half
+#define __put_user_nocheck_4 __put_user_asm_word
+#define __put_user_nocheck_8 __put_user_asm_dword
+
+#endif /* !CONFIG_CPU_SPECTRE */
-#define __put_user_asm_byte(x,__pu_addr,err) \
+#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(strb) " %1,[%2],#0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
@@ -354,38 +446,36 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
+#define __put_user_asm_byte(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strb" __t)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strh" __t)
+
+#else
+
#ifndef __ARMEB__
-#define __put_user_asm_half(x,__pu_addr,err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
- unsigned long __temp = (unsigned long)(x); \
- __put_user_asm_byte(__temp, __pu_addr, err); \
- __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
+ unsigned long __temp = (__force unsigned long)(x); \
+ __put_user_asm_byte(__temp, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
})
#else
-#define __put_user_asm_half(x,__pu_addr,err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
- unsigned long __temp = (unsigned long)(x); \
- __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
- __put_user_asm_byte(__temp, __pu_addr + 1, err); \
+ unsigned long __temp = (__force unsigned long)(x); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
})
#endif
-#define __put_user_asm_word(x,__pu_addr,err) \
- __asm__ __volatile__( \
- "1: " TUSER(str) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err) \
- : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
- : "cc")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#define __put_user_asm_word(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "str" __t)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -395,14 +485,14 @@ do { \
#define __reg_oper1 "%R2"
#endif
-#define __put_user_asm_dword(x,__pu_addr,err) \
+#define __put_user_asm_dword(x, __pu_addr, err, __t) \
__asm__ __volatile__( \
- ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
- ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
- THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
- THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
+ ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
- " .pushsection .fixup,\"ax\"\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
@@ -416,48 +506,129 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (src); \
+ unsigned long __src = (unsigned long)(__pk_ptr); \
+ type __val; \
+ int __err = 0; \
+ switch (sizeof(type)) { \
+ case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
+ case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
+ case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
+ case 8: { \
+ u32 *__v32 = (u32*)&__val; \
+ __get_user_asm_word(__v32[0], __src, __err, ""); \
+ if (__err) \
+ break; \
+ __get_user_asm_word(__v32[1], __src+4, __err, ""); \
+ break; \
+ } \
+ default: __err = __get_user_bad(); break; \
+ } \
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
+ put_unaligned(__val, (type *)(dst)); \
+ else \
+ *(type *)(dst) = __val; /* aligned by caller */ \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (dst); \
+ unsigned long __dst = (unsigned long)__pk_ptr; \
+ int __err = 0; \
+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
+ ? get_unaligned((type *)(src)) \
+ : *(type *)(src); /* aligned by caller */ \
+ switch (sizeof(type)) { \
+ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
+ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
+ case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
+ case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ if (__err) \
+ goto err_label; \
+} while (0)
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
-#else
-#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
-#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
-#endif
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
- memset(to, 0, n);
+ unsigned int __ua_flags;
+
+ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
return n;
}
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+ unsigned int __ua_flags;
+ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
return n;
+#else
+ return arm_copy_to_user(to, from, n);
+#endif
}
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+#else
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
+#endif
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
n = __clear_user(to, n);
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 14749aec94bf..4048c92d9c2b 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASMARM_UCONTEXT_H
#define _ASMARM_UCONTEXT_H
#include <asm/fpstate.h>
+#include <asm/user.h>
/*
* struct sigcontext only has room for the basic registers, but struct
@@ -35,16 +37,11 @@ struct ucontext {
* bytes, to prevent unpredictable padding in the signal frame.
*/
-#ifdef CONFIG_CRUNCH
-#define CRUNCH_MAGIC 0x5065cf03
-#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
-
-struct crunch_sigframe {
- unsigned long magic;
- unsigned long size;
- struct crunch_state storage;
-} __attribute__((__aligned__(8)));
-#endif
+/*
+ * Dummy padding block: if this magic is encountered, the block should
+ * be skipped using the corresponding size field.
+ */
+#define DUMMY_MAGIC 0xb0d9ed01
#ifdef CONFIG_IWMMXT
/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */
@@ -84,9 +81,6 @@ struct vfp_sigframe
* one of these.
*/
struct aux_sigframe {
-#ifdef CONFIG_CRUNCH
- struct crunch_sigframe crunch;
-#endif
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f46b4d2..ce9689118dbb 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -1,35 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/asm-arm/unified.h - Unified Assembler Syntax helper macros
*
* Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_UNIFIED_H
#define __ASM_UNIFIED_H
-#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
+#if defined(__ASSEMBLY__)
.syntax unified
+#else
+__asm__(".syntax unified");
#endif
-#ifdef CONFIG_THUMB2_KERNEL
-
-#if __GNUC__ < 4
-#error Thumb-2 kernel requires gcc >= 4
+#ifdef CONFIG_CPU_V7M
+#define AR_CLASS(x...)
+#define M_CLASS(x...) x
+#else
+#define AR_CLASS(x...) x
+#define M_CLASS(x...)
#endif
+#ifdef CONFIG_THUMB2_KERNEL
+
/* The CPSR bit describing the instruction set (Thumb) */
#define PSR_ISETSTATE PSR_T_BIT
@@ -37,7 +31,8 @@
#define THUMB(x...) x
#ifdef __ASSEMBLY__
#define W(instr) instr.w
-#define BSYM(sym) sym + 1
+#else
+#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
@@ -49,82 +44,10 @@
#define THUMB(x...)
#ifdef __ASSEMBLY__
#define W(instr) instr
-#define BSYM(sym) sym
+#else
+#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
-#ifndef CONFIG_ARM_ASM_UNIFIED
-
-/*
- * If the unified assembly syntax isn't used (in ARM mode), these
- * macros expand to an empty string
- */
-#ifdef __ASSEMBLY__
- .macro it, cond
- .endm
- .macro itt, cond
- .endm
- .macro ite, cond
- .endm
- .macro ittt, cond
- .endm
- .macro itte, cond
- .endm
- .macro itet, cond
- .endm
- .macro itee, cond
- .endm
- .macro itttt, cond
- .endm
- .macro ittte, cond
- .endm
- .macro ittet, cond
- .endm
- .macro ittee, cond
- .endm
- .macro itett, cond
- .endm
- .macro itete, cond
- .endm
- .macro iteet, cond
- .endm
- .macro iteee, cond
- .endm
-#else /* !__ASSEMBLY__ */
-__asm__(
-" .macro it, cond\n"
-" .endm\n"
-" .macro itt, cond\n"
-" .endm\n"
-" .macro ite, cond\n"
-" .endm\n"
-" .macro ittt, cond\n"
-" .endm\n"
-" .macro itte, cond\n"
-" .endm\n"
-" .macro itet, cond\n"
-" .endm\n"
-" .macro itee, cond\n"
-" .endm\n"
-" .macro itttt, cond\n"
-" .endm\n"
-" .macro ittte, cond\n"
-" .endm\n"
-" .macro ittet, cond\n"
-" .endm\n"
-" .macro ittee, cond\n"
-" .endm\n"
-" .macro itett, cond\n"
-" .endm\n"
-" .macro itete, cond\n"
-" .endm\n"
-" .macro iteet, cond\n"
-" .endm\n"
-" .macro iteee, cond\n"
-" .endm\n");
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_ARM_ASM_UNIFIED */
-
#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 141baa3f9a72..9fb00973c608 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/unistd.h
*
* Copyright (C) 2001-2005 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Please forward _all_ changes to this file to rmk@arm.linux.org.uk,
* no matter what the change is. Thanks!
*/
@@ -14,27 +11,25 @@
#define __ASM_ARM_UNISTD_H
#include <uapi/asm/unistd.h>
+#include <asm/unistd-nr.h>
-#define __NR_syscalls (380)
-#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
-
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
+#define __ARCH_WANT_SYS_UTIME32
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
-#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_TIME32
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
@@ -47,7 +42,24 @@
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64
-#define __IGNORE_migrate_pages
-#define __IGNORE_kcmp
+
+#ifdef __ARM_EABI__
+/*
+ * The following syscalls are obsolete and no longer available for EABI:
+ * __NR_time
+ * __NR_umount
+ * __NR_stime
+ * __NR_alarm
+ * __NR_utime
+ * __NR_getrlimit
+ * __NR_select
+ * __NR_readdir
+ * __NR_mmap
+ * __NR_socketcall
+ * __NR_syscall
+ * __NR_ipc
+ */
+#define __IGNORE_getrlimit
+#endif
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index d1c3f3a71c94..d60b09a5acfc 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/unwind.h
*
* Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_UNWIND_H
@@ -36,6 +24,7 @@ struct unwind_idx {
struct unwind_table {
struct list_head list;
+ struct list_head mod_list;
const struct unwind_idx *start;
const struct unwind_idx *origin;
const struct unwind_idx *stop;
@@ -48,7 +37,12 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
unsigned long text_addr,
unsigned long text_size);
extern void unwind_table_del(struct unwind_table *tab);
-extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
+
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
new file mode 100644
index 000000000000..6a61b2874926
--- /dev/null
+++ b/arch/arm/include/asm/uprobes.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/opcodes.h>
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES 64
+
+#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
+#define UPROBE_SS_ARM_INSN 0xe7f001fa
+#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
+#define UPROBE_SWBP_INSN_SIZE 4
+
+struct arch_uprobe_task {
+ u32 backup;
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ u8 insn[MAX_UINSN_BYTES];
+ unsigned long ixol[2];
+ uprobe_opcode_t bpinsn;
+ bool simulate;
+ u32 pcreg;
+ void (*prehandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ void (*posthandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ struct arch_probes_insn asi;
+};
+
+#endif
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 35917b3a97f9..167d44b550f4 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_USER_H
#define _ARM_USER_H
@@ -76,10 +77,6 @@ struct user{
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index fa88d09fa3d9..4512f7e1918f 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common defines for v7m cpus
*/
@@ -12,14 +13,22 @@
#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
#define V7M_SCB_VTOR 0x08
+#define V7M_SCB_AIRCR 0x0c
+#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
+#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
+
#define V7M_SCB_SCR 0x10
#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
#define V7M_SCB_CCR 0x14
#define V7M_SCB_CCR_STKALIGN (1 << 9)
+#define V7M_SCB_CCR_DC (1 << 16)
+#define V7M_SCB_CCR_IC (1 << 17)
+#define V7M_SCB_CCR_BP (1 << 18)
#define V7M_SCB_SHPR2 0x1c
#define V7M_SCB_SHPR3 0x20
@@ -30,7 +39,7 @@
#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
-#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
/*
* When branching to an address that has bits [31:28] == 0xf an exception return
@@ -41,4 +50,49 @@
* (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
*/
#define EXC_RET_STACK_MASK 0x00000004
-#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
+
+/* Cache related definitions */
+
+#define V7M_SCB_CLIDR 0x78 /* Cache Level ID register */
+#define V7M_SCB_CTR 0x7c /* Cache Type register */
+#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
+#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */
+
+/* Memory-mapped MPU registers for M-class */
+#define MPU_TYPE 0x90
+#define MPU_CTRL 0x94
+#define MPU_CTRL_ENABLE 1
+#define MPU_CTRL_PRIVDEFENA (1 << 2)
+
+#define PMSAv7_RNR 0x98
+#define PMSAv7_RBAR 0x9c
+#define PMSAv7_RASR 0xa0
+
+#define PMSAv8_RNR 0x98
+#define PMSAv8_RBAR 0x9c
+#define PMSAv8_RLAR 0xa0
+#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
+#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
+#define PMSAv8_MAIR0 0xc0
+#define PMSAv8_MAIR1 0xc4
+
+/* Cache opeartions */
+#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
+#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
+#define V7M_SCB_DCIMVAC 0x25c /* D-cache invalidate by MVA to PoC */
+#define V7M_SCB_DCISW 0x260 /* D-cache invalidate by set-way */
+#define V7M_SCB_DCCMVAU 0x264 /* D-cache clean by MVA to PoU */
+#define V7M_SCB_DCCMVAC 0x268 /* D-cache clean by MVA to PoC */
+#define V7M_SCB_DCCSW 0x26c /* D-cache clean by set-way */
+#define V7M_SCB_DCCIMVAC 0x270 /* D-cache clean and invalidate by MVA to PoC */
+#define V7M_SCB_DCCISW 0x274 /* D-cache clean and invalidate by set-way */
+#define V7M_SCB_BPIALL 0x278 /* D-cache clean and invalidate by set-way */
+
+#ifndef __ASSEMBLY__
+
+enum reboot_mode;
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd);
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
new file mode 100644
index 000000000000..88364a6727ff
--- /dev/null
+++ b/arch/arm/include/asm/vdso.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#ifdef __KERNEL__
+
+#define __VDSO_PAGES 4
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
+#ifdef CONFIG_VDSO
+
+void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
+
+extern unsigned int vdso_total_pages;
+
+#else /* CONFIG_VDSO */
+
+static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
+{
+}
+
+#define vdso_total_pages 0
+
+#endif /* CONFIG_VDSO */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..50c0b19fb755
--- /dev/null
+++ b/arch/arm/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h
new file mode 100644
index 000000000000..bed16fa1865e
--- /dev/null
+++ b/arch/arm/include/asm/vdso/cp15.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_CP15_H
+#define __ASM_VDSO_CP15_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15
+
+#include <linux/stringify.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
+#endif /* CONFIG_CPU_CP15 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CP15_H */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..1e9f81639c88
--- /dev/null
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 ARM Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+#include <asm/unistd.h>
+#include <asm/vdso/cp15.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline int gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct __kernel_old_timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres_time64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static inline bool arm_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
+}
+#define __arch_vdso_hres_capable arm_vdso_hres_capable
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode,
+ const struct vdso_time_data *vd)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u64 cycle_now;
+
+ /*
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
+ */
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
+
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
+
+ return cycle_now;
+#else
+ /* Make GCC happy. This is compiled out anyway */
+ return 0;
+#endif
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..45efb3ff511c
--- /dev/null
+++ b/arch/arm/include/asm/vdso/processor.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..ff1c729af05f
--- /dev/null
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/cacheflush.h>
+
+static __always_inline
+void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
+{
+ flush_dcache_page(virt_to_page(vdata));
+}
+#define __arch_sync_vdso_time_data __arch_sync_vdso_time_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm/include/asm/vermagic.h b/arch/arm/include/asm/vermagic.h
new file mode 100644
index 000000000000..62ce94e26a63
--- /dev/null
+++ b/arch/arm/include/asm/vermagic.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+
+/*
+ * Add the ARM architecture version to the version magic string
+ */
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_ARMVSN \
+ MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_P2V
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34fd4f72..85ccc422d4d0 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/vfp.h
*
@@ -5,13 +6,8 @@
* First, the standard VFP set.
*/
-#define FPSID cr0
-#define FPSCR cr1
-#define MVFR1 cr6
-#define MVFR0 cr7
-#define FPEXC cr8
-#define FPINST cr9
-#define FPINST2 cr10
+#ifndef __ASM_VFP_H
+#define __ASM_VFP_H
/* FPSID bits */
#define FPSID_IMPLEMENTER_BIT (24)
@@ -22,6 +18,7 @@
#define FPSID_NODOUBLE (1<<20)
#define FPSID_ARCH_BIT (16)
#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
+#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT)
#define FPSID_PART_BIT (8)
#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
#define FPSID_VARIANT_BIT (4)
@@ -75,6 +72,16 @@
/* MVFR0 bits */
#define MVFR0_A_SIMD_BIT (0)
#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT)
+#define MVFR0_SP_BIT (4)
+#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT)
+#define MVFR0_DP_BIT (8)
+#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
+
+/* MVFR1 bits */
+#define MVFR1_ASIMDHP_BIT (20)
+#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
+#define MVFR1_FPHP_BIT (24)
+#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
@@ -82,3 +89,9 @@
#define VFPOPDESC_UNUSED_BIT (24)
#define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT)
#define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
+
+#ifndef __ASSEMBLY__
+void vfp_disable(void);
+#endif
+
+#endif /* __ASM_VFP_H */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 301c1db3e99b..e2e1d5a3727a 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/include/asm/vfpmacros.h
*
@@ -7,34 +8,35 @@
#include <asm/vfp.h>
-@ Macros to allow building with old toolkits (with no VFP support)
.macro VFPFMRX, rd, sysreg, cond
- MRC\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMRX \rd, \sysreg
+ vmrs\cond \rd, \sysreg
.endm
.macro VFPFMXR, sysreg, rd, cond
- MCR\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMXR \sysreg, \rd
+ vmsr\cond \sysreg, \rd
.endm
@ read all the working registers back into the VFP
.macro VFPFLDMIA, base, tmp
+ .fpu vfpv2
#if __LINUX_ARM_ARCH__ < 6
- LDC p11, cr0, [\base],#33*4 @ FLDMIAX \base!, {d0-d15}
+ fldmiax \base!, {d0-d15}
#else
- LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15}
+ vldmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
@@ -43,22 +45,23 @@
@ write all the working registers out of the VFP
.macro VFPFSTMIA, base, tmp
#if __LINUX_ARM_ARCH__ < 6
- STC p11, cr0, [\base],#33*4 @ FSTMIAX \base!, {d0-d15}
+ fstmiax \base!, {d0-d15}
#else
- STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15}
+ vstmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 91f40217bfa5..6c430ec371df 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_VGA_H
#define ASMARM_VGA_H
#include <linux/io.h>
extern unsigned long vga_base;
+extern struct screen_info vgacon_screen_info;
#define VGA_MAP_MEM(x,s) (vga_base + (x))
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac737..dd9697b2bde8 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2012 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef VIRT_H
@@ -29,6 +16,7 @@
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT
/*
@@ -41,10 +29,19 @@
*/
extern int __boot_cpu_mode;
-void __hyp_set_vectors(unsigned long phys_vector_base);
-unsigned long __hyp_get_vectors(void);
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
#else
#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
#endif
#ifndef ZIMAGE
@@ -62,8 +59,23 @@ static inline bool is_hyp_mode_mismatched(void)
{
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
}
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return false;
+}
+
#endif
+#else
+
+/* Only assembly code should need those */
+
+#define HVC_SET_VECTORS 0
+#define HVC_SOFT_RESTART 1
+
#endif /* __ASSEMBLY__ */
+#define HVC_STUB_ERR 0xbadca11
+
#endif /* ! VIRT_H */
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644
index 000000000000..a9b3718b8600
--- /dev/null
+++ b/arch/arm/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..0341973e30e1
--- /dev/null
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x) x
+#else
+#define ARM_CPU_DISCARD(x) x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
+#endif
+
+#ifdef CONFIG_MMU
+#define ARM_MMU_KEEP(x) KEEP(x)
+#define ARM_MMU_DISCARD(x)
+#else
+#define ARM_MMU_KEEP(x)
+#define ARM_MMU_DISCARD(x) x
+#endif
+
+/*
+ * ld.lld does not support NOCROSSREFS:
+ * https://github.com/ClangBuiltLinux/linux/issues/1609
+ */
+#ifdef CONFIG_LD_IS_LLD
+#define NOCROSSREFS
+#endif
+
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
+#define OVERLAY_KEEP(x) KEEP(x)
+#else
+#define OVERLAY_KEEP(x) x
+#endif
+
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section) \
+ sym##_start = LOADADDR(section); \
+ sym##_end = LOADADDR(section) + SIZEOF(section)
+
+#define PROC_INFO \
+ . = ALIGN(4); \
+ __proc_info_begin = .; \
+ KEEP(*(.proc.info.init)) \
+ __proc_info_end = .;
+
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .; \
+
+#define ARM_DISCARD \
+ *(.ARM.exidx.exit.text) \
+ *(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
+ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
+ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
+ ARM_EXIT_DISCARD(EXIT_TEXT) \
+ ARM_EXIT_DISCARD(EXIT_DATA) \
+ EXIT_CALL \
+ ARM_MMU_DISCARD(*(.text.fixup)) \
+ ARM_MMU_DISCARD(*(__ex_table)) \
+ COMMON_DISCARDS
+
+/*
+ * Sections that should stay zero sized, which is safer to explicitly
+ * check instead of blindly discarding.
+ */
+#define ARM_ASSERTS \
+ .plt : { \
+ *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
+ } \
+ ASSERT(SIZEOF(.plt) == 0, \
+ "Unexpected run-time procedure linkages detected!")
+
+#define ARM_DETAILS \
+ ELF_DETAILS \
+ .ARM.attributes 0 : { *(.ARM.attributes) }
+
+#define ARM_STUBS_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ *(.vfp11_veneer) \
+ *(.v4_bx)
+
+#define ARM_TEXT \
+ IDMAP_TEXT \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .; \
+ IRQENTRY_TEXT \
+ SOFTIRQENTRY_TEXT \
+ TEXT_TEXT \
+ SCHED_TEXT \
+ LOCK_TEXT \
+ KPROBES_TEXT \
+ ARM_STUBS_TEXT \
+ . = ALIGN(4); \
+ *(.got) /* Global offset table */ \
+ ARM_CPU_KEEP(PROC_INFO)
+
+/* Stack unwinding tables */
+#define ARM_UNWIND_SECTIONS \
+ . = ALIGN(8); \
+ .ARM.unwind_idx : { \
+ __start_unwind_idx = .; \
+ *(.ARM.exidx*) \
+ __stop_unwind_idx = .; \
+ } \
+ .ARM.unwind_tab : { \
+ __start_unwind_tab = .; \
+ *(.ARM.extab*) \
+ __stop_unwind_tab = .; \
+ }
+
+/*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+#define ARM_VECTORS \
+ __vectors_lma = .; \
+ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
+ .vectors { \
+ OVERLAY_KEEP(*(.vectors)) \
+ } \
+ .vectors.bhb.loop8 { \
+ OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
+ } \
+ .vectors.bhb.bpiall { \
+ OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
+ } \
+ } \
+ ARM_LMA(__vectors, .vectors); \
+ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
+ ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
+ . = __vectors_lma + SIZEOF(.vectors) + \
+ SIZEOF(.vectors.bhb.loop8) + \
+ SIZEOF(.vectors.bhb.bpiall); \
+ \
+ __stubs_lma = .; \
+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
+ *(.stubs) \
+ } \
+ ARM_LMA(__stubs, .stubs); \
+ . = __stubs_lma + SIZEOF(.stubs); \
+ \
+ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+
+#define ARM_TCM \
+ __itcm_start = ALIGN(4); \
+ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
+ __sitcm_text = .; \
+ *(.tcm.text) \
+ *(.tcm.rodata) \
+ . = ALIGN(4); \
+ __eitcm_text = .; \
+ } \
+ . = __itcm_start + SIZEOF(.text_itcm); \
+ \
+ __dtcm_start = .; \
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
+ __sdtcm_data = .; \
+ *(.tcm.data) \
+ . = ALIGN(4); \
+ __edtcm_data = .; \
+ } \
+ . = __dtcm_start + SIZEOF(.data_dtcm);
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index 4d52f92967a6..f9a3897b06e7 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_WORD_AT_A_TIME_H
#define __ASM_ARM_WORD_AT_A_TIME_H
@@ -7,7 +8,8 @@
* Little-endian word-at-a-time zero byte handling.
* Heavily based on the x86 algorithm.
*/
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
@@ -48,10 +50,14 @@ static inline unsigned long find_zero(unsigned long mask)
return ret;
}
-#ifdef CONFIG_DCACHE_WORD_ACCESS
-
#define zero_bytemask(mask) (mask)
+#else /* __ARMEB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
/*
* Load an unaligned word from kernel space.
*
@@ -67,13 +73,17 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
asm(
"1: ldr %0, [%2]\n"
"2:\n"
- " .pushsection .fixup,\"ax\"\n"
+ " .pushsection .text.fixup,\"ax\"\n"
" .align 2\n"
"3: and %1, %2, #0x3\n"
" bic %2, %2, #0x3\n"
" ldr %0, [%2]\n"
" lsl %1, %1, #0x3\n"
+#ifndef __ARMEB__
" lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
" b 2b\n"
" .popsection\n"
" .pushsection __ex_table,\"a\"\n"
@@ -86,11 +96,5 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
return ret;
}
-
#endif /* DCACHE_WORD_ACCESS */
-
-#else /* __ARMEB__ */
-#include <asm-generic/word-at-a-time.h>
-#endif
-
#endif /* __ASM_ARM_WORD_AT_A_TIME_H */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 8b1f37bfeeec..c83086f745cf 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_XEN_EVENTS_H
#define _ASM_ARM_XEN_EVENTS_H
@@ -16,8 +17,14 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
atomic64_t, \
counter), (val))
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+ return true;
+}
+
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 7704e28c3483..3522cbaed316 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1,71 +1 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
-
-#include <xen/interface/xen.h>
-
-long privcmd_call(unsigned call, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5);
-int HYPERVISOR_xen_version(int cmd, void *arg);
-int HYPERVISOR_console_io(int cmd, int count, char *str);
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-int HYPERVISOR_sched_op(int cmd, void *arg);
-int HYPERVISOR_event_channel_op(int cmd, void *arg);
-unsigned long HYPERVISOR_hvm_op(int op, void *arg);
-int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
-int HYPERVISOR_physdev_op(int cmd, void *arg);
-int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
-
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
-static inline int
-HYPERVISOR_multicall(void *call_list, int nr_calls)
-{
- BUG();
-}
-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a0c9eb..d6e7709d0688 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -1,19 +1 @@
-#ifndef _ASM_ARM_XEN_HYPERVISOR_H
-#define _ASM_ARM_XEN_HYPERVISOR_H
-
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
-#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 1151188bcd83..88c0d75da190 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -1,80 +1 @@
-/******************************************************************************
- * Guest OS interface to ARM Xen.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- */
-
-#ifndef _ASM_ARM_XEN_INTERFACE_H
-#define _ASM_ARM_XEN_INTERFACE_H
-
-#include <linux/types.h>
-
-#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-
-#define __DEFINE_GUEST_HANDLE(name, type) \
- typedef struct { union { type *p; uint64_aligned_t q; }; } \
- __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name) \
- __DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name) __guest_handle_ ## name
-
-#define set_xen_guest_handle(hnd, val) \
- do { \
- if (sizeof(hnd) == 8) \
- *(uint64_t *)&(hnd) = 0; \
- (hnd).p = val; \
- } while (0)
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the interface with
- * Xen so that we can have one ABI that works for 32 and 64 bit guests.
- * Note that this means that the xen_pfn_t type may be capable of
- * representing pfn's which the guest cannot represent in its own pfn
- * type. However since pfn space is controlled by the guest this is
- * fine since it simply wouldn't be able to create any sure pfns in
- * the first place.
- */
-typedef uint64_t xen_pfn_t;
-#define PRI_xen_pfn "llx"
-typedef uint64_t xen_ulong_t;
-#define PRI_xen_ulong "llx"
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-DEFINE_GUEST_HANDLE(xen_ulong_t);
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 1
-
-struct arch_vcpu_info { };
-struct arch_shared_info { };
-
-/* TODO: Move pvclock definitions some place arch independent */
-struct pvclock_vcpu_time_info {
- u32 version;
- u32 pad0;
- u64 tsc_timestamp;
- u64 system_time;
- u32 tsc_to_system_mul;
- s8 tsc_shift;
- u8 flags;
- u8 pad[2];
-} __attribute__((__packed__)); /* 32 bytes */
-
-/* It is OK to have a 12 bytes struct with no padding because it is packed */
-struct pvclock_wall_clock {
- u32 version;
- u32 sec;
- u32 nsec;
-} __attribute__((__packed__));
-#endif
-
-#endif /* _ASM_ARM_XEN_INTERFACE_H */
+#include <xen/arm/interface.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 359a7b50b158..dc7f6e91aafa 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,92 +1,6 @@
-#ifndef _ASM_ARM_XEN_PAGE_H
-#define _ASM_ARM_XEN_PAGE_H
+#include <xen/arm/page.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/pfn.h>
-#include <linux/types.h>
-
-#include <xen/interface/grant_table.h>
-
-#define pfn_to_mfn(pfn) (pfn)
-#define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_pfn(mfn) (mfn)
-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-
-#define pte_mfn pte_pfn
-#define mfn_pte pfn_pte
-
-/* Xen machine address */
-typedef struct xmaddr {
- phys_addr_t maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
- phys_addr_t paddr;
-} xpaddr_t;
-
-#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
-#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
-
-#define INVALID_P2M_ENTRY (~0UL)
-
-static inline xmaddr_t phys_to_machine(xpaddr_t phys)
-{
- unsigned offset = phys.paddr & ~PAGE_MASK;
- return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
-}
-
-static inline xpaddr_t machine_to_phys(xmaddr_t machine)
-{
- unsigned offset = machine.maddr & ~PAGE_MASK;
- return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
-}
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
-#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-
-static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
-{
- /* TODO: assuming it is mapped in the kernel 1:1 */
- return virt_to_machine(vaddr);
-}
-
-/* TODO: this shouldn't be here but it is because the frontend drivers
- * are using it (its rolled in headers) even though we won't hit the code path.
- * So for right now just punt with this.
- */
-static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
-{
- BUG();
- return NULL;
-}
-
-static inline int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
-{
- return 0;
-}
-
-static inline int m2p_remove_override(struct page *page, bool clear_pte)
+static inline bool xen_kernel_unmapped_at_usr(void)
{
- return 0;
+ return false;
}
-
-static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
-}
-
-static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- return __set_phys_to_machine(pfn, mfn);
-}
-
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
-
-#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000000..455ade5d5320
--- /dev/null
+++ b/arch/arm/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
new file mode 100644
index 000000000000..7ebb7eb0bd93
--- /dev/null
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <xen/arm/xen-ops.h>
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 7604673dc427..934b549905f5 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -1,13 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/xor.h
*
* Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#include <linux/hardirq.h>
#include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
#define __XOR(a1, a2) a1 ^= a2
@@ -44,13 +44,14 @@
: "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
static void
-xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -64,14 +65,15 @@ xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -86,8 +88,10 @@ xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -105,8 +109,11 @@ xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -138,4 +145,81 @@ static struct xor_block_template xor_block_arm4regs = {
xor_speed(&xor_block_arm4regs); \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
+ NEON_TEMPLATES; \
} while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_2(bytes, p1, p2);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_2(bytes, p1, p2);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_3(bytes, p1, p2, p3);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_4(bytes, p1, p2, p3, p4);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+ kernel_neon_end();
+ }
+}
+
+static struct xor_block_template xor_block_neon = {
+ .name = "neon",
+ .do_2 = xor_neon_2,
+ .do_3 = xor_neon_3,
+ .do_4 = xor_neon_4,
+ .do_5 = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+ do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif