summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild14
-rw-r--r--arch/arm/include/asm/arch_gicv3.h138
-rw-r--r--arch/arm/include/asm/arch_timer.h37
-rw-r--r--arch/arm/include/asm/archrandom.h12
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h252
-rw-r--r--arch/arm/include/asm/assembler.h403
-rw-r--r--arch/arm/include/asm/atomic.h109
-rw-r--r--arch/arm/include/asm/bitops.h19
-rw-r--r--arch/arm/include/asm/bug.h4
-rw-r--r--arch/arm/include/asm/bugs.h4
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h54
-rw-r--r--arch/arm/include/asm/checksum.h16
-rw-r--r--arch/arm/include/asm/clocksource.h7
-rw-r--r--arch/arm/include/asm/cmpxchg.h27
-rw-r--r--arch/arm/include/asm/cp15.h20
-rw-r--r--arch/arm/include/asm/cpuidle.h10
-rw-r--r--arch/arm/include/asm/cputype.h4
-rw-r--r--arch/arm/include/asm/current.h71
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/device.h7
-rw-r--r--arch/arm/include/asm/div64.h32
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-direct.h17
-rw-r--r--arch/arm/include/asm/dma-iommu.h5
-rw-r--r--arch/arm/include/asm/dma-mapping.h199
-rw-r--r--arch/arm/include/asm/dma.h13
-rw-r--r--arch/arm/include/asm/domain.h15
-rw-r--r--arch/arm/include/asm/efi.h61
-rw-r--r--arch/arm/include/asm/elf.h12
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S40
-rw-r--r--arch/arm/include/asm/exception.h4
-rw-r--r--arch/arm/include/asm/fb.h15
-rw-r--r--arch/arm/include/asm/fixmap.h8
-rw-r--r--arch/arm/include/asm/floppy.h90
-rw-r--r--arch/arm/include/asm/fpstate.h11
-rw-r--r--arch/arm/include/asm/ftrace.h11
-rw-r--r--arch/arm/include/asm/futex.h14
-rw-r--r--arch/arm/include/asm/gpio.h26
-rw-r--r--arch/arm/include/asm/hardirq.h28
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h5
-rw-r--r--arch/arm/include/asm/hardware/cache-feroceon-l2.h6
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h5
-rw-r--r--arch/arm/include/asm/hardware/dec21285.h20
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-iomd.S131
-rw-r--r--arch/arm/include/asm/hardware/it8152.h116
-rw-r--r--arch/arm/include/asm/hardware/locomo.h6
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h4
-rw-r--r--arch/arm/include/asm/highmem.h43
-rw-r--r--arch/arm/include/asm/hugetlb.h8
-rw-r--r--arch/arm/include/asm/hypervisor.h3
-rw-r--r--arch/arm/include/asm/ide.h24
-rw-r--r--arch/arm/include/asm/idmap.h4
-rw-r--r--arch/arm/include/asm/insn.h25
-rw-r--r--arch/arm/include/asm/io.h62
-rw-r--r--arch/arm/include/asm/irq.h6
-rw-r--r--arch/arm/include/asm/kasan.h33
-rw-r--r--arch/arm/include/asm/kasan_def.h81
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/asm/kexec.h7
-rw-r--r--arch/arm/include/asm/kfence.h53
-rw-r--r--arch/arm/include/asm/kmap_types.h10
-rw-r--r--arch/arm/include/asm/kprobes.h24
-rw-r--r--arch/arm/include/asm/kvm_arm.h239
-rw-r--r--arch/arm/include/asm/kvm_asm.h77
-rw-r--r--arch/arm/include/asm/kvm_coproc.h36
-rw-r--r--arch/arm/include/asm/kvm_emulate.h351
-rw-r--r--arch/arm/include/asm/kvm_host.h457
-rw-r--r--arch/arm/include/asm/kvm_hyp.h126
-rw-r--r--arch/arm/include/asm/kvm_mmio.h26
-rw-r--r--arch/arm/include/asm/kvm_mmu.h435
-rw-r--r--arch/arm/include/asm/kvm_ras.h14
-rw-r--r--arch/arm/include/asm/mach/arch.h7
-rw-r--r--arch/arm/include/asm/mach/dma.h5
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h7
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/memory.h128
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h48
-rw-r--r--arch/arm/include/asm/module.h52
-rw-r--r--arch/arm/include/asm/module.lds.h7
-rw-r--r--arch/arm/include/asm/nwflash.h1
-rw-r--r--arch/arm/include/asm/opcodes.h9
-rw-r--r--arch/arm/include/asm/page.h35
-rw-r--r--arch/arm/include/asm/paravirt.h14
-rw-r--r--arch/arm/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/arm/include/asm/pci.h5
-rw-r--r--arch/arm/include/asm/percpu.h37
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgalloc.h21
-rw-r--r--arch/arm/include/asm/pgtable-2level.h38
-rw-r--r--arch/arm/include/asm/pgtable-3level.h41
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h17
-rw-r--r--arch/arm/include/asm/pgtable.h127
-rw-r--r--arch/arm/include/asm/proc-fns.h2
-rw-r--r--arch/arm/include/asm/processor.h19
-rw-r--r--arch/arm/include/asm/prom.h4
-rw-r--r--arch/arm/include/asm/ptdump.h1
-rw-r--r--arch/arm/include/asm/ptrace.h34
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/sections.h6
-rw-r--r--arch/arm/include/asm/semihost.h30
-rw-r--r--arch/arm/include/asm/set_memory.h9
-rw-r--r--arch/arm/include/asm/setup.h18
-rw-r--r--arch/arm/include/asm/signal.h7
-rw-r--r--arch/arm/include/asm/simd.h8
-rw-r--r--arch/arm/include/asm/smp.h17
-rw-r--r--arch/arm/include/asm/sparsemem.h2
-rw-r--r--arch/arm/include/asm/spectre.h42
-rw-r--r--arch/arm/include/asm/spinlock.h2
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/stackprotector.h11
-rw-r--r--arch/arm/include/asm/stacktrace.h24
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h75
-rw-r--r--arch/arm/include/asm/string.h26
-rw-r--r--arch/arm/include/asm/suspend.h1
-rw-r--r--arch/arm/include/asm/switch_to.h5
-rw-r--r--arch/arm/include/asm/sync_bitops.h29
-rw-r--r--arch/arm/include/asm/syscall.h29
-rw-r--r--arch/arm/include/asm/syscalls.h51
-rw-r--r--arch/arm/include/asm/system_misc.h1
-rw-r--r--arch/arm/include/asm/tcm.h19
-rw-r--r--arch/arm/include/asm/thread_info.h88
-rw-r--r--arch/arm/include/asm/timex.h1
-rw-r--r--arch/arm/include/asm/tlb.h16
-rw-r--r--arch/arm/include/asm/tlbflush.h27
-rw-r--r--arch/arm/include/asm/tls.h35
-rw-r--r--arch/arm/include/asm/topology.h10
-rw-r--r--arch/arm/include/asm/traps.h15
-rw-r--r--arch/arm/include/asm/uaccess-asm.h111
-rw-r--r--arch/arm/include/asm/uaccess.h208
-rw-r--r--arch/arm/include/asm/ucontext.h14
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/unwind.h8
-rw-r--r--arch/arm/include/asm/user.h4
-rw-r--r--arch/arm/include/asm/v7m.h3
-rw-r--r--arch/arm/include/asm/vdso/clocksource.h8
-rw-r--r--arch/arm/include/asm/vdso/cp15.h38
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h60
-rw-r--r--arch/arm/include/asm/vdso/processor.h22
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h35
-rw-r--r--arch/arm/include/asm/vermagic.h31
-rw-r--r--arch/arm/include/asm/vfp.h8
-rw-r--r--arch/arm/include/asm/vfpmacros.h31
-rw-r--r--arch/arm/include/asm/vga.h1
-rw-r--r--arch/arm/include/asm/virt.h17
-rw-r--r--arch/arm/include/asm/vmalloc.h4
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h171
-rw-r--r--arch/arm/include/asm/xen/page.h5
-rw-r--r--arch/arm/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h (renamed from arch/arm/include/asm/xen/page-coherent.h)2
-rw-r--r--arch/arm/include/asm/xor.h46
155 files changed, 2531 insertions, 3891 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index fa579b23b4df..03657ff8fbe3 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,22 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += compat.h
-generic-y += current.h
generic-y += early_ioremap.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
generic-y += flat.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += parport.h
-generic-y += preempt.h
-generic-y += seccomp.h
-generic-y += serial.h
-generic-y += trace_clock.h
generated-y += mach-types.h
generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index fa50bb04f580..311e83038bdb 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
@@ -37,71 +38,6 @@
#define ICC_AP1R2 __ICC_AP1Rx(2)
#define ICC_AP1R3 __ICC_AP1Rx(3)
-#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
-
-#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
-#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
-#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
-#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
-#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
-#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
-
-#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
-#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
-
-#define ICH_LR0 __LR0(0)
-#define ICH_LR1 __LR0(1)
-#define ICH_LR2 __LR0(2)
-#define ICH_LR3 __LR0(3)
-#define ICH_LR4 __LR0(4)
-#define ICH_LR5 __LR0(5)
-#define ICH_LR6 __LR0(6)
-#define ICH_LR7 __LR0(7)
-#define ICH_LR8 __LR8(0)
-#define ICH_LR9 __LR8(1)
-#define ICH_LR10 __LR8(2)
-#define ICH_LR11 __LR8(3)
-#define ICH_LR12 __LR8(4)
-#define ICH_LR13 __LR8(5)
-#define ICH_LR14 __LR8(6)
-#define ICH_LR15 __LR8(7)
-
-/* LR top half */
-#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
-#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
-
-#define ICH_LRC0 __LRC0(0)
-#define ICH_LRC1 __LRC0(1)
-#define ICH_LRC2 __LRC0(2)
-#define ICH_LRC3 __LRC0(3)
-#define ICH_LRC4 __LRC0(4)
-#define ICH_LRC5 __LRC0(5)
-#define ICH_LRC6 __LRC0(6)
-#define ICH_LRC7 __LRC0(7)
-#define ICH_LRC8 __LRC8(0)
-#define ICH_LRC9 __LRC8(1)
-#define ICH_LRC10 __LRC8(2)
-#define ICH_LRC11 __LRC8(3)
-#define ICH_LRC12 __LRC8(4)
-#define ICH_LRC13 __LRC8(5)
-#define ICH_LRC14 __LRC8(6)
-#define ICH_LRC15 __LRC8(7)
-
-#define __ICH_AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
-#define ICH_AP0R0 __ICH_AP0Rx(0)
-#define ICH_AP0R1 __ICH_AP0Rx(1)
-#define ICH_AP0R2 __ICH_AP0Rx(2)
-#define ICH_AP0R3 __ICH_AP0Rx(3)
-
-#define __ICH_AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
-#define ICH_AP1R0 __ICH_AP1Rx(0)
-#define ICH_AP1R1 __ICH_AP1Rx(1)
-#define ICH_AP1R2 __ICH_AP1Rx(2)
-#define ICH_AP1R3 __ICH_AP1Rx(3)
-
-/* A32-to-A64 mappings used by VGIC save/restore */
-
#define CPUIF_MAP(a32, a64) \
static inline void write_ ## a64(u32 val) \
{ \
@@ -112,21 +48,7 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \
} \
-#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
-static inline void write_ ## a64(u64 val) \
-{ \
- write_sysreg(lower_32_bits(val), a32lo);\
- write_sysreg(upper_32_bits(val), a32hi);\
-} \
-static inline u64 read_ ## a64(void) \
-{ \
- u64 val = read_sysreg(a32lo); \
- \
- val |= (u64)read_sysreg(a32hi) << 32; \
- \
- return val; \
-}
-
+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
@@ -137,51 +59,11 @@ CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
-CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
-CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
-CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
-CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
-CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
-CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
-CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
-CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
-CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
-CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
-CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
-CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
-CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
-CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
-CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
-
-CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
-CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
-CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
-CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
-CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
-CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
-CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
-CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
-CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
-CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
-CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
-CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
-CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
-CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
-CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
-CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
-
#define read_gicreg(r) read_##r()
#define write_gicreg(v, r) write_##r(v)
/* Low-level accessors */
-static inline void gic_write_eoir(u32 irq)
-{
- write_sysreg(irq, ICC_EOIR1);
- isb();
-}
-
static inline void gic_write_dir(u32 val)
{
write_sysreg(val, ICC_DIR);
@@ -325,15 +207,16 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
/*
- * GITS_VPROPBASER - hi and lo bits may be accessed independently.
+ * GICR_VPROPBASER - hi and lo bits may be accessed independently.
*/
-#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
- * GITS_VPENDBASER - the Valid bit must be cleared before changing
+ * GICR_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
-static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
+static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
{
u32 tmp;
@@ -350,7 +233,7 @@ static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
__gic_writeq_nonatomic(val, addr);
}
-#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
static inline bool gic_prio_masking_enabled(void)
{
@@ -369,5 +252,10 @@ static inline void gic_arch_enable_irqs(void)
WARN_ON_ONCE(true);
}
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return false;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 99175812d903..bb129b6d2366 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -7,6 +7,7 @@
#include <asm/hwcap.h>
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
@@ -24,29 +25,35 @@ int arch_timer_arch_init(void);
* the code. At least it does so with a recent GCC (4.6.3).
*/
static __always_inline
-void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
+ isb();
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
+ case ARCH_TIMER_REG_CVAL:
+ asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
-
- isb();
}
static __always_inline
@@ -59,19 +66,19 @@ u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
- break;
+ default:
+ BUILD_BUG();
}
+ } else {
+ BUILD_BUG();
}
return val;
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
new file mode 100644
index 000000000000..cc4714eb1a75
--- /dev/null
+++ b/arch/arm/include/asm/archrandom.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
+#include <asm-generic/archrandom.h>
+
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..a41b503b7dcd
--- /dev/null
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+
+#define PMCCNTR __ACCESS_CP15_64(0, c9)
+
+#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
+#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
+#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
+#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
+#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
+#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
+#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
+#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
+#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
+#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
+#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
+#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(PMEVCNTR##n)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, PMEVCNTR##n)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, PMEVTYPER##n)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_sysreg(PMMIR);
+}
+
+static inline u32 read_pmuver(void)
+{
+ /* PMUVers is not a signed field */
+ u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
+
+ return (dfr0 >> 24) & 0xf;
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, PMCR);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(PMCR);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, PMSELR);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, PMCCNTR);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(PMCCNTR);
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, PMCNTENSET);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, PMCNTENCLR);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, PMINTENSET);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, PMINTENCLR);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, PMCCFILTR);
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, PMOVSR);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(PMOVSR);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, PMUSERENR);
+}
+
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return false;
+}
+
+static inline bool kvm_set_pmuserenr(u64 val)
+{
+ return false;
+}
+
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+/* PMU Version in DFR Register */
+#define ARMV8_PMU_DFR_VER_NI 0
+#define ARMV8_PMU_DFR_VER_V3P1 0x4
+#define ARMV8_PMU_DFR_VER_V3P4 0x5
+#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
+ pmuver == ARMV8_PMU_DFR_VER_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+}
+
+static inline u64 read_pmceid0(void)
+{
+ u64 val = read_sysreg(PMCEID0);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID2) << 32;
+
+ return val;
+}
+
+static inline u64 read_pmceid1(void)
+{
+ u64 val = read_sysreg(PMCEID1);
+
+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+ val |= (u64)read_sysreg(PMCEID3) << 32;
+
+ return val;
+}
+
+#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 99929122dad7..aebe2c8f6a68 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -18,11 +18,11 @@
#endif
#include <asm/ptrace.h>
-#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#define IOMEM(x) (x)
@@ -86,6 +86,10 @@
#define IMM12_MASK 0xfff
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
/*
* Enable and disable interrupts
*/
@@ -107,6 +111,16 @@
.endm
#endif
+#if __LINUX_ARM_ARCH__ < 7
+ .macro dsb, args
+ mcr p15, 0, r0, c7, c10, 4
+ .endm
+
+ .macro isb, args
+ mcr p15, 0, r0, c7, c5, 4
+ .endm
+#endif
+
.macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
.if \save
@@ -203,10 +217,8 @@
* Get current thread_info.
*/
.macro get_thread_info, rd
- ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
- THUMB( mov \rd, sp )
- THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
- mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
+ /* thread_info is the first member of struct task_struct */
+ get_current \rd
.endm
/*
@@ -224,20 +236,12 @@
sub \tmp, \tmp, #1 @ decrement it
str \tmp, [\ti, #TI_PREEMPT]
.endm
-
- .macro dec_preempt_count_ti, ti, tmp
- get_thread_info \ti
- dec_preempt_count \ti, \tmp
- .endm
#else
.macro inc_preempt_count, ti, tmp
.endm
.macro dec_preempt_count, ti, tmp
.endm
-
- .macro dec_preempt_count_ti, ti, tmp
- .endm
#endif
#define USERL(l, x...) \
@@ -259,7 +263,8 @@
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
+ .align 2 ;\
+ .long 9998b - . ;\
9997: instr ;\
.if . - 9997b == 2 ;\
nop ;\
@@ -269,10 +274,10 @@
.endif ;\
.popsection
#define ALT_UP_B(label) \
- .equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
- W(b) . + up_b_offset ;\
+ .align 2 ;\
+ .long 9998b - . ;\
+ W(b) . + (label - 9998b) ;\
.popsection
#else
#define ALT_SMP(instr...)
@@ -280,6 +285,80 @@
#define ALT_UP_B(label) b label
#endif
+ /*
+ * this_cpu_offset - load the per-CPU offset of this CPU into
+ * register 'rd'
+ */
+ .macro this_cpu_offset, rd:req
+#ifdef CONFIG_SMP
+ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L1_\@)
+.L0_\@:
+ .subsection 1
+.L1_\@: ldr_va \rd, __per_cpu_offset
+ b .L0_\@
+ .previous
+#endif
+#else
+ mov \rd, #0
+#endif
+ .endm
+
+ /*
+ * set_current - store the task pointer of this CPU's current task
+ */
+ .macro set_current, rn:req, tmp:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: str_va \rn, __current, \tmp
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ str_va \rn, __current, \tmp
+#endif
+ .endm
+
+ /*
+ * get_current - load the task pointer of this CPU's current task
+ */
+ .macro get_current, rd:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: ldr_va \rd, __current
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ ldr_va \rd, __current
+#endif
+ .endm
+
+ /*
+ * reload_current - reload the task pointer of this CPU's current task
+ * into the TLS register
+ */
+ .macro reload_current, t1:req, t2:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+#endif
+ ldr_this_cpu \t1, __entry_task, \t1, \t2
+ mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
+.L0_\@:
+#endif
+ .endm
+
/*
* Instruction barrier
*/
@@ -315,6 +394,23 @@
#endif
.endm
+/*
+ * Raw SMP data memory barrier
+ */
+ .macro __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
+ dmb ish
+ .else
+ W(dmb) ish
+ .endif
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#else
+ .error "Incompatible SMP platform"
+#endif
+ .endm
+
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
@@ -446,79 +542,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
- .macro csdb
-#ifdef CONFIG_THUMB2_KERNEL
- .inst.w 0xf3af8014
-#else
- .inst 0xe320f014
-#endif
- .endm
-
- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
- adds \tmp, \addr, #\size - 1
- sbcscc \tmp, \tmp, \limit
- bcs \bad
-#ifdef CONFIG_CPU_SPECTRE
- movcs \addr, #0
- csdb
-#endif
-#endif
- .endm
-
- .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
-#ifdef CONFIG_CPU_SPECTRE
- sub \tmp, \limit, #1
- subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
- addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
- subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
- movlo \addr, #0 @ if (tmp < 0) addr = NULL
- csdb
-#endif
- .endm
-
- .macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_DISABLE
- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_ENABLE
- mcr p15, 0, \tmp, c3, c0, 0
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_save, tmp
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #SVC_DACR]
-#endif
- .endm
-
- .macro uaccess_restore
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #SVC_DACR]
- mcr p15, 0, r0, c3, c0, 0
-#endif
- .endm
-
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
@@ -568,4 +591,204 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#define _ASM_NOKPROBE(entry)
#endif
+ .macro __adldst_l, op, reg, sym, tmp, c
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\c \tmp, .La\@
+ .subsection 1
+ .align 2
+.La\@: .long \sym - .Lpc\@
+ .previous
+ .else
+ .ifnb \c
+ THUMB( ittt \c )
+ .endif
+ movw\c \tmp, #:lower16:\sym - .Lpc\@
+ movt\c \tmp, #:upper16:\sym - .Lpc\@
+ .endif
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .set .Lpc\@, . + 8 // PC bias
+ .ifc \op, add
+ add\c \reg, \tmp, pc
+ .else
+ \op\c \reg, [pc, \tmp]
+ .endif
+#else
+.Lb\@: add\c \tmp, \tmp, pc
+ /*
+ * In Thumb-2 builds, the PC bias depends on whether we are currently
+ * emitting into a .arm or a .thumb section. The size of the add opcode
+ * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
+ * emitting in ARM mode, so let's use this to account for the bias.
+ */
+ .set .Lpc\@, . + (. - .Lb\@)
+
+ .ifnc \op, add
+ \op\c \reg, [\tmp]
+ .endif
+#endif
+ .endm
+
+ /*
+ * mov_l - move a constant value or [relocated] address into a register
+ */
+ .macro mov_l, dst:req, imm:req, cond
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\cond \dst, =\imm
+ .else
+ movw\cond \dst, #:lower16:\imm
+ movt\cond \dst, #:upper16:\imm
+ .endif
+ .endm
+
+ /*
+ * adr_l - adr pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro adr_l, dst:req, sym:req, cond
+ __adldst_l add, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * ldr_l - ldr <literal> pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro ldr_l, dst:req, sym:req, cond
+ __adldst_l ldr, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * str_l - str <literal> pseudo-op with unlimited range
+ *
+ * @src: source register
+ * @sym: name of the symbol
+ * @tmp: mandatory scratch register
+ * @cond: conditional opcode suffix
+ */
+ .macro str_l, src:req, sym:req, tmp:req, cond
+ __adldst_l str, \src, \sym, \tmp, \cond
+ .endm
+
+ .macro __ldst_va, op, reg, tmp, sym, cond, offset
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ mov_l \tmp, \sym, \cond
+#else
+ /*
+ * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
+ * with the appropriate relocations. The combined sequence has a range
+ * of -/+ 256 MiB, which should be sufficient for the core kernel and
+ * for modules loaded into the module region.
+ */
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+.L0_\@: sub\cond \tmp, pc, #8 - \offset
+.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
+.L2_\@:
+#endif
+ \op\cond \reg, [\tmp, #\offset]
+ .endm
+
+ /*
+ * ldr_va - load a 32-bit word from the virtual address of \sym
+ */
+ .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
+ .ifnb \tmp
+ __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
+ .else
+ __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
+ .endif
+ .endm
+
+ /*
+ * str_va - store a 32-bit word to the virtual address of \sym
+ */
+ .macro str_va, rn:req, sym:req, tmp:req, cond
+ __ldst_va str, \rn, \tmp, \sym, \cond, 0
+ .endm
+
+ /*
+ * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
+ * without using a temp register. Supported in ARM mode
+ * only.
+ */
+ .macro ldr_this_cpu_armv6, rd:req, sym:req
+ this_cpu_offset \rd
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+ add \rd, \rd, pc
+.L0_\@: sub \rd, \rd, #4
+.L1_\@: sub \rd, \rd, #0
+.L2_\@: ldr \rd, [\rd, #4]
+ .endm
+
+ /*
+ * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
+ * into register 'rd', which may be the stack pointer,
+ * using 't1' and 't2' as general temp registers. These
+ * are permitted to overlap with 'rd' if != sp
+ */
+ .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
+#ifndef CONFIG_SMP
+ ldr_va \rd, \sym, tmp=\t1
+#elif __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ this_cpu_offset \t1
+ mov_l \t2, \sym
+ ldr \rd, [\t1, \t2]
+#else
+ ldr_this_cpu_armv6 \rd, \sym
+#endif
+ .endm
+
+ /*
+ * rev_l - byte-swap a 32-bit value
+ *
+ * @val: source/destination register
+ * @tmp: scratch register
+ */
+ .macro rev_l, val:req, tmp:req
+ .if __LINUX_ARM_ARCH__ < 6
+ eor \tmp, \val, \val, ror #16
+ bic \tmp, \tmp, #0x00ff0000
+ mov \val, \val, ror #8
+ eor \val, \val, \tmp, lsr #8
+ .else
+ rev \val, \val
+ .endif
+ .endm
+
+ .if __LINUX_ARM_ARCH__ < 6
+ .set .Lrev_l_uses_tmp, 1
+ .else
+ .set .Lrev_l_uses_tmp, 0
+ .endif
+
+ /*
+ * bl_r - branch and link to register
+ *
+ * @dst: target to branch to
+ * @c: conditional opcode suffix
+ */
+ .macro bl_r, dst:req, c
+ .if __LINUX_ARM_ARCH__ < 6
+ mov\c lr, pc
+ mov\c pc, \dst
+ .else
+ blx\c \dst
+ .endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 75bb2c543e59..f0e3b01afa74 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef __KERNEL__
/*
@@ -24,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -36,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -54,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -75,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -95,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -125,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -153,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -162,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -172,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -186,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -199,7 +197,17 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -212,8 +220,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-
-#define atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#endif /* __LINUX_ARM_ARCH__ */
@@ -225,7 +232,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -242,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
s64 counter;
@@ -252,7 +257,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -265,7 +270,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -274,7 +279,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
);
}
#else
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -287,7 +292,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
s64 tmp;
@@ -304,7 +309,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -324,7 +329,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -347,7 +352,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
@@ -376,34 +381,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
s64 oldval;
unsigned long res;
@@ -424,9 +429,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
s64 result;
unsigned long tmp;
@@ -444,9 +449,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
@@ -472,9 +477,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 oldval, newval;
unsigned long tmp;
@@ -502,7 +507,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index c92e42a5c8f7..714440fa2fc6 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_be(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
#ifndef CONFIG_SMP
/*
@@ -264,7 +266,6 @@ static inline int find_next_bit_le(const void *p, int size, int offset)
#endif
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
/*
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index deef4d0cb3b5..ba8d9d7d242b 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -82,10 +82,12 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
struct mm_struct;
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
+extern void __show_regs_alloc_free(struct pt_regs *regs);
#endif
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index 97a312ba0840..fe385551edec 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/include/asm/bugs.h
- *
* Copyright (C) 1995-2003 Russell King
*/
#ifndef __ASM_BUGS_H
@@ -10,10 +8,8 @@
extern void check_writebuffer_bugs(void);
#ifdef CONFIG_MMU
-extern void check_bugs(void);
extern void check_other_bugs(void);
#else
-#define check_bugs() do { } while (0)
#define check_other_bugs() do { } while (0)
#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 1d65ed3a2755..e3ea34558ada 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -24,6 +24,6 @@
#define ARCH_SLAB_MINALIGN 8
#endif
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(".data..read_mostly")
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 7114b9aa46b8..1075534b0a2e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -231,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
vma->vm_flags);
}
-static inline void
-vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
}
}
@@ -247,22 +248,24 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) \
- vivt_flush_cache_page(vma,addr,pfn)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -289,8 +292,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *);
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -312,21 +318,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(vma, page, vmaddr);
}
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-extern void flush_kernel_dcache_page(struct page *);
-
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
-
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
@@ -346,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
dsb(ishst);
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
@@ -450,15 +446,10 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
* however some exceptions may exist. Caveat emptor.
*
* - The clobber list is dictated by the call to v7_flush_dcache_*.
- * fp is preserved to the stack explicitly prior disabling the cache
- * since adding it to the clobber list is incompatible with having
- * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
- * trampoline are inserted by the linker and to keep sp 64-bit aligned.
*/
#define v7_exit_coherency_flush(level) \
asm volatile( \
".arch armv7-a \n\t" \
- "stmfd sp!, {fp, ip} \n\t" \
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
@@ -468,10 +459,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
"isb \n\t" \
- "dsb \n\t" \
- "ldmfd sp!, {fp, ip}" \
- : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
- "r9","r10","lr","memory" )
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
index 20043e0ebb07..d8a13959bff0 100644
--- a/arch/arm/include/asm/checksum.h
+++ b/arch/arm/include/asm/checksum.h
@@ -11,6 +11,7 @@
#define __ASM_ARM_CHECKSUM_H
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
@@ -35,10 +36,21 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+
+ return csum_partial_copy_from_user(src, dst, len);
+}
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h
index 0b350a7e26f3..13651c731a81 100644
--- a/arch/arm/include/asm/clocksource.h
+++ b/arch/arm/include/asm/clocksource.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
-struct arch_clocksource_data {
- bool vdso_direct; /* Usable for direct VDSO access? */
-};
+#include <asm/vdso/clocksource.h>
-#endif
+#endif /* _ASM_CLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 8b701f8e175c..44667bdb4707 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -25,7 +25,8 @@
#define swp_is_buggy
#endif
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+static inline unsigned long
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
@@ -114,9 +115,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
- (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))); \
+#define arch_xchg_relaxed(ptr, x) ({ \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
@@ -128,20 +129,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -207,7 +208,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -224,7 +225,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -234,7 +235,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -266,13 +267,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index d2453e2d3f1f..a54230e65647 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -50,25 +50,7 @@
#ifdef CONFIG_CPU_CP15
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
- "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
- "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
-
-#define __read_sysreg(r, w, c, t) ({ \
- t __val; \
- asm volatile(r " " c : "=r" (__val)); \
- __val; \
-})
-#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
-
-#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
-
-#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
-#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
-
-#define CNTVCT __ACCESS_CP15_64(1, c14)
+#include <asm/vdso/cp15.h>
extern unsigned long cr_alignment; /* defined in entry-armv.S */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 6b2ff7243b4b..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,11 +44,15 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section(__cpuidle_method_of_table) \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
extern int arm_cpuidle_init(int cpu);
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 775cac3c02bb..0163c3e78a67 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -25,6 +25,8 @@
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
+#define CPUID_EXT_ISAR6 0x7c
+#define CPUID_EXT_PFR2 0x90
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -40,6 +42,8 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+#define CPUID_EXT_ISAR6 "c2, 7"
+#define CPUID_EXT_PFR2 "c3, 4"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
new file mode 100644
index 000000000000..1e1178bf176d
--- /dev/null
+++ b/arch/arm/include/asm/current.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Keith Packard <keithp@keithp.com>
+ * Copyright (c) 2021 Google, LLC <ardb@kernel.org>
+ */
+
+#ifndef _ASM_ARM_CURRENT_H
+#define _ASM_ARM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+#include <asm/insn.h>
+
+struct task_struct;
+
+extern struct task_struct *__current;
+
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
+{
+ struct task_struct *cur;
+
+#if __has_builtin(__builtin_thread_pointer) && \
+ defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
+ !(defined(CONFIG_THUMB2_KERNEL) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
+ /*
+ * Use the __builtin helper when available - this results in better
+ * code, especially when using GCC in combination with the per-task
+ * stack protector, as the compiler will recognize that it needs to
+ * load the TLS register only once in every function.
+ *
+ * Clang < 13.0.1 gets this wrong for Thumb2 builds:
+ * https://github.com/ClangBuiltLinux/linux/issues/1485
+ */
+ cur = __builtin_thread_pointer();
+#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+ asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __current \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r"(cur));
+#elif __LINUX_ARM_ARCH__>= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ cur = __current;
+#else
+ asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
+#endif
+ return cur;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARM_CURRENT_H */
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 4f80b72372b4..1d069e558d8d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/param.h> /* HZ */
/*
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index c675bc0d5aa8..c6beb1708c64 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -6,16 +6,9 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_DMABOUNCE
- struct dmabounce_device_info *dmabounce;
-#endif
-#ifdef CONFIG_IOMMU_API
- void *iommu; /* private IOMMU data */
-#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
- unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1;
};
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 898e9c78a7e7..4b69cf850451 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -21,29 +21,20 @@
* assembly implementation with completely non standard calling convention
* for arguments and results (beware).
*/
-
-#ifdef __ARMEB__
-#define __xh "r0"
-#define __xl "r1"
-#else
-#define __xl "r0"
-#define __xh "r1"
-#endif
-
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
{
register unsigned int __base asm("r4") = base;
register unsigned long long __n asm("r0") = *n;
register unsigned long long __res asm("r2");
- register unsigned int __rem asm(__xh);
- asm( __asmeq("%0", __xh)
+ unsigned int __rem;
+ asm( __asmeq("%0", "r0")
__asmeq("%1", "r2")
- __asmeq("%2", "r0")
- __asmeq("%3", "r4")
+ __asmeq("%2", "r4")
"bl __do_div64"
- : "=r" (__rem), "=r" (__res)
- : "r" (__n), "r" (__base)
+ : "+r" (__n), "=r" (__res)
+ : "r" (__base)
: "ip", "lr", "cc");
+ __rem = __n >> 32;
*n = __res;
return __rem;
}
@@ -61,17 +52,6 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
#else
-/*
- * gcc versions earlier than 4.0 are simply too problematic for the
- * __div64_const32() code in asm-generic/div64.h. First there is
- * gcc PR 15089 that tend to trig on more complex constructs, spurious
- * .global __udivsi3 are inserted even if none of those symbols are
- * referenced in the generated code, and those gcc versions are not able
- * to do constant propagation on long long values anyway.
- */
-
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-
static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
{
unsigned long long res;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
deleted file mode 100644
index d785187a6f8a..000000000000
--- a/arch/arm/include/asm/dma-contiguous.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_DMA_CONTIGUOUS_H
-#define ASMARM_DMA_CONTIGUOUS_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_DMA_CMA
-
-#include <linux/types.h>
-
-void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
-
-#endif
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
deleted file mode 100644
index 7c3001a6a775..000000000000
--- a/arch/arm/include/asm/dma-direct.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_ARM_DMA_DIRECT_H
-#define ASM_ARM_DMA_DIRECT_H 1
-
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-#endif /* ASM_ARM_DMA_DIRECT_H */
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 772f48ef84b7..82ec1ccf1fee 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -6,7 +6,6 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
#include <linux/kref.h>
struct dma_iommu_mapping {
@@ -25,7 +24,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
+arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
@@ -33,7 +32,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
-int arm_dma_supported(struct device *dev, u64 mask);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
deleted file mode 100644
index bdd80ddbca34..000000000000
--- a/arch/arm/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_DMA_MAPPING_H
-#define ASMARM_DMA_MAPPING_H
-
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-
-#include <asm/memory.h>
-
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
-extern const struct dma_map_ops arm_dma_ops;
-extern const struct dma_map_ops arm_coherent_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
- return &arm_dma_ops;
- return NULL;
-}
-
-#ifdef __arch_page_to_dma
-#error Please update to __arch_pfn_to_dma
-#endif
-
-/*
- * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
- * functions used internally by the DMA-mapping API to provide DMA
- * addresses. They must not be used by drivers.
- */
-#ifndef __arch_pfn_to_dma
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- if (dev)
- pfn -= dev->dma_pfn_offset;
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- unsigned long pfn = __bus_to_pfn(addr);
-
- if (dev)
- pfn += dev->dma_pfn_offset;
-
- return pfn;
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- if (dev) {
- unsigned long pfn = dma_to_pfn(dev, addr);
-
- return phys_to_virt(__pfn_to_phys(pfn));
- }
-
- return (void *)__bus_to_virt((unsigned long)addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- if (dev)
- return pfn_to_dma(dev, virt_to_pfn(addr));
-
- return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
-}
-
-#else
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return __arch_pfn_to_dma(dev, pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_pfn(dev, addr);
-}
-
-static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
-{
- return __arch_dma_to_virt(dev, addr);
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- return __arch_virt_to_dma(dev, addr);
-}
-#endif
-
-/**
- * arm_dma_alloc - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
- *
- * Allocate some memory for a device for performing DMA. This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
- */
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, unsigned long attrs);
-
-/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs);
-
-/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- * @needs_bounce_fn: called to determine whether buffer needs bouncing
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- */
-extern int dmabounce_register_dev(struct device *, unsigned long,
- unsigned long, int (*)(struct device *, dma_addr_t, size_t));
-
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
- */
-extern void dmabounce_unregister_dev(struct device *);
-
-
-
-/*
- * The scatter list versions of the above methods.
- */
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index a81dda65c576..e2a1916013e7 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -10,8 +10,11 @@
#else
#define MAX_DMA_ADDRESS ({ \
extern phys_addr_t arm_dma_zone_size; \
- arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
+
+extern phys_addr_t arm_dma_limit;
+#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
#endif
#ifdef CONFIG_ISA_DMA_API
@@ -106,7 +109,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
- __set_dma_addr(chan, (void *)__bus_to_virt(addr))
+ __set_dma_addr(chan, (void *)isa_bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
@@ -143,10 +146,4 @@ extern int get_dma_residue(unsigned int chan);
#endif /* CONFIG_ISA_DMA_API */
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index f1d0a7807cd0..d48859fdf32c 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -8,8 +8,8 @@
#define __ASM_PROC_DOMAIN_H
#ifndef __ASSEMBLY__
+#include <linux/thread_info.h>
#include <asm/barrier.h>
-#include <asm/thread_info.h>
#endif
/*
@@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val)
}
#endif
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define modify_domain(dom,type) \
- do { \
- unsigned int domain = get_domain(); \
- domain &= ~domain_mask(dom); \
- domain = domain | domain_val(dom, type); \
- set_domain(domain); \
- } while (0)
-
-#else
-static inline void modify_domain(unsigned dom, unsigned type) { }
-#endif
-
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions (inline assembly)
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 7667826b93f1..78282ced5038 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -13,25 +13,18 @@
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#ifdef CONFIG_EFI
void efi_init(void);
+void arm_efi_init(void);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
-int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
-#define arch_efi_call_virt(p, f, args...) \
-({ \
- efi_##f##_t *__f; \
- __f = p->f; \
- __f(args); \
-})
-
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK)
@@ -45,29 +38,11 @@ void efi_virtmap_load(void);
void efi_virtmap_unload(void);
#else
-#define efi_init()
+#define arm_efi_init()
#endif /* CONFIG_EFI */
/* arch specific definitions used by the stub code */
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
-#define __efi_call_early(f, ...) f(__VA_ARGS__)
-#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
-#define efi_is_64bit() (false)
-
-#define efi_table_attr(table, attr, instance) \
- ((table##_t *)instance)->attr
-
-#define efi_call_proto(protocol, f, instance, ...) \
- ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
-
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
-void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
-
-static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
-{
-}
-
/*
* A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
* so we will reserve that amount of memory. We have no easy way to tell what
@@ -78,25 +53,29 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
/*
- * The kernel zImage should preferably be located between 32 MB and 128 MB
- * from the base of DRAM. The min address leaves space for a maximal size
- * uncompressed image, and the max address is due to how the zImage decompressor
- * picks a destination address.
+ * phys-to-virt patching requires that the physical to virtual offset is a
+ * multiple of 2 MiB. However, using an alignment smaller than TEXT_OFFSET
+ * here throws off the memory allocation logic, so let's use the lowest power
+ * of two greater than 2 MiB and greater than TEXT_OFFSET.
*/
-#define ZIMAGE_OFFSET_LIMIT SZ_128M
-#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
+#define EFI_PHYS_ALIGN max(UL(SZ_2M), roundup_pow_of_two(TEXT_OFFSET))
-/* on ARM, the FDT should be located in the first 128 MB of RAM */
-static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
+/* on ARM, the initrd should be loaded in a lowmem region */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return dram_base + ZIMAGE_OFFSET_LIMIT;
+ return round_down(image_addr, SZ_4M) + SZ_512M;
}
-/* on ARM, the initrd should be loaded in a lowmem region */
-static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
- unsigned long image_addr)
+struct efi_arm_entry_state {
+ u32 cpsr_before_ebs;
+ u32 sctlr_before_ebs;
+ u32 cpsr_after_ebs;
+ u32 sctlr_after_ebs;
+};
+
+static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- return dram_base + SZ_512M;
+ __cpuc_flush_dcache_area(addr, size);
}
#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index b078d992414b..d68101655b74 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -51,6 +51,7 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
@@ -58,11 +59,18 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
/*
* These are used to set parameters in the core dumps.
@@ -111,10 +119,6 @@ extern int elf_check_arch(const struct elf32_hdr *);
extern int arm_elf_read_implies_exec(int);
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk)
-struct task_struct;
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
-#define ELF_CORE_COPY_TASK_REGS dump_task_regs
-
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
deleted file mode 100644
index dfc6bfa43012..000000000000
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm/assembler.h>
-
-/*
- * Interrupt handling. Preserves r7, r8, r9
- */
- .macro arch_irq_handler_default
- get_irqnr_preamble r6, lr
-1: get_irqnr_and_base r0, r2, r6, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- badrne lr, 1b
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r2) and base (r6) are
- * preserved from get_irqnr_and_base above
- */
- ALT_SMP(test_for_ipi r0, r2, r6, lr)
- ALT_UP_B(9997f)
- movne r1, sp
- badrne lr, 1b
- bne do_IPI
-#endif
-9997:
- .endm
-
- .macro arch_irq_handler, symbol_name
- .align 5
- .global \symbol_name
-\symbol_name:
- mov r8, lr
- arch_irq_handler_default
- ret r8
- .endm
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
index 58e039a851af..3c82975d46db 100644
--- a/arch/arm/include/asm/exception.h
+++ b/arch/arm/include/asm/exception.h
@@ -10,10 +10,6 @@
#include <linux/interrupt.h>
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry
-#endif
#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
index d92e99cd8c8a..ce20a43c3033 100644
--- a/arch/arm/include/asm/fb.h
+++ b/arch/arm/include/asm/fb.h
@@ -1,19 +1,6 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-}
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
+#include <asm-generic/fb.h>
#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 472c93db5dac..707068f852c2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,19 +2,19 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-#define FIXADDR_START 0xffc00000UL
+#define FIXADDR_START 0xffc80000UL
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
-#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <asm/kmap_size.h>
enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index f4fe4d02cef2..e1cb04ed5008 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -8,19 +8,21 @@
*/
#ifndef __ASM_ARM_FLOPPY_H
#define __ASM_ARM_FLOPPY_H
-#if 0
-#include <mach/floppy.h>
-#endif
-#define fd_outb(val,port) \
- do { \
- if ((port) == (u32)FD_DOR) \
- fd_setdor((val)); \
- else \
- outb((val),(port)); \
+#define fd_outb(val, base, reg) \
+ do { \
+ int new_val = (val); \
+ if ((reg) == FD_DOR) { \
+ if (new_val & 0xf0) \
+ new_val = (new_val & 0x0c) | \
+ floppy_selects[new_val & 3]; \
+ else \
+ new_val &= 0x0c; \
+ } \
+ outb(new_val, (base) + (reg)); \
} while(0)
-#define fd_inb(port) inb((port))
+#define fd_inb(base, reg) inb((base) + (reg))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
@@ -53,69 +55,7 @@ static inline int fd_dma_setup(void *data, unsigned int length,
* to a non-zero track, and then restoring it to track 0. If an error occurs,
* then there is no floppy drive present. [to be put back in again]
*/
-static unsigned char floppy_selects[2][4] =
-{
- { 0x10, 0x21, 0x23, 0x33 },
- { 0x10, 0x21, 0x23, 0x33 }
-};
-
-#define fd_setdor(dor) \
-do { \
- int new_dor = (dor); \
- if (new_dor & 0xf0) \
- new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \
- else \
- new_dor &= 0x0c; \
- outb(new_dor, FD_DOR); \
-} while (0)
-
-/*
- * Someday, we'll automatically detect which drives are present...
- */
-static inline void fd_scandrives (void)
-{
-#if 0
- int floppy, drive_count;
-
- fd_disable_irq();
- raw_cmd = &default_raw_cmd;
- raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
- raw_cmd->track = 0;
- raw_cmd->rate = ?;
- drive_count = 0;
- for (floppy = 0; floppy < 4; floppy ++) {
- current_drive = drive_count;
- /*
- * Turn on floppy motor
- */
- if (start_motor(redo_fd_request))
- continue;
- /*
- * Set up FDC
- */
- fdc_specify();
- /*
- * Tell FDC to recalibrate
- */
- output_byte(FD_RECALIBRATE);
- LAST_OUT(UNIT(floppy));
- /* wait for command to complete */
- if (!successful) {
- int i;
- for (i = drive_count; i < 3; i--)
- floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
- floppy_selects[fdc][3] = 0;
- floppy -= 1;
- } else
- drive_count++;
- }
-#else
- floppy_selects[0][0] = 0x10;
- floppy_selects[0][1] = 0x21;
- floppy_selects[0][2] = 0x23;
- floppy_selects[0][3] = 0x33;
-#endif
-}
+static unsigned char floppy_selects[4] = { 0x10, 0x21, 0x23, 0x33 };
#define FDC1 (0x3f0)
@@ -135,9 +75,7 @@ static inline void fd_scandrives (void)
*/
static void driveswap(int *ints, int dummy, int dummy2)
{
- floppy_selects[0][0] ^= floppy_selects[0][1];
- floppy_selects[0][1] ^= floppy_selects[0][0];
- floppy_selects[0][0] ^= floppy_selects[0][1];
+ swap(floppy_selects[0], floppy_selects[1]);
}
#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
index 9e2fe9ced084..e29d9c7a5238 100644
--- a/arch/arm/include/asm/fpstate.h
+++ b/arch/arm/include/asm/fpstate.h
@@ -46,9 +46,6 @@ union vfp_state {
struct vfp_hard_struct hard;
};
-extern void vfp_flush_thread(union vfp_state *);
-extern void vfp_release_thread(union vfp_state *);
-
#define FP_HARD_SIZE 35
struct fp_hard_struct {
@@ -77,14 +74,6 @@ union fp_state {
#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
-struct crunch_state {
- unsigned int mvdx[16][2];
- unsigned int mvax[4][3];
- unsigned int dspsc[2];
-};
-
-#define CRUNCH_SIZE sizeof(struct crunch_state)
-
#endif
#endif
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 48ec1d0337da..5be3ddc96a50 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,6 +2,8 @@
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
@@ -15,6 +17,9 @@ extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct module *mod;
+#endif
};
static inline unsigned long ftrace_call_adjust(unsigned long addr)
@@ -45,7 +50,7 @@ void *return_address(unsigned int);
static inline void *return_address(unsigned int level)
{
- return NULL;
+ return NULL;
}
#endif
@@ -70,6 +75,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
return !strcasecmp(sym, name);
}
+void prepare_ftrace_return(unsigned long *parent, unsigned long self,
+ unsigned long frame_pointer,
+ unsigned long stack_pointer);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 83c391b597d4..a9151884bc85 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -134,10 +134,12 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
int oldval = 0, ret, tmp;
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
#ifndef CONFIG_SMP
preempt_disable();
#endif
- pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -159,13 +161,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
ret = -ENOSYS;
}
- pagefault_enable();
#ifndef CONFIG_SMP
preempt_enable();
#endif
- if (!ret)
- *oval = oldval;
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
return ret;
}
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
deleted file mode 100644
index c50e383358c4..000000000000
--- a/arch/arm/include/asm/gpio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ARCH_ARM_GPIO_H
-#define _ARCH_ARM_GPIO_H
-
-#if CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#endif
-
-/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
-#include <asm-generic/gpio.h>
-
-/* The trivial gpiolib dispatchers */
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-
-/*
- * Provide a default gpio_to_irq() which should satisfy every case.
- * However, some platforms want to do this differently, so allow them
- * to override it.
- */
-#ifndef gpio_to_irq
-#define gpio_to_irq __gpio_to_irq
-#endif
-
-#endif /* _ARCH_ARM_GPIO_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..706efafbf972 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -2,33 +2,11 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/threads.h>
#include <asm/irq.h>
-/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
-
-typedef struct {
- unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#else
-#define smp_irq_stat_cpu(cpu) 0
-#endif
-
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
index 39769ffa0051..9694808ee97c 100644
--- a/arch/arm/include/asm/hardware/cache-aurora-l2.h
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AURORA shared L2 cache controller support
*
@@ -5,10 +6,6 @@
*
* Yehuda Yitschak <yehuday@marvell.com>
* Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
index 12e1588dc4f1..eb2e7b7f70a8 100644
--- a/arch/arm/include/asm/hardware/cache-feroceon-l2.h
+++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-feroceon-l2.h
*
* Copyright (C) 2008 Marvell Semiconductor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
extern void __init feroceon_l2_init(int l2_wt_override);
extern int __init feroceon_of_init(void);
-
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index a6d4ee86ba54..5a7ee70f561c 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -9,6 +9,8 @@
#define __ASM_ARM_HARDWARE_L2X0_H
#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
#define L2X0_CACHE_ID 0x000
#define L2X0_CACHE_TYPE 0x004
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
index 295e2e40151b..4e493facaa31 100644
--- a/arch/arm/include/asm/hardware/cache-tauros2.h
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/hardware/cache-tauros2.h
*
* Copyright (C) 2008 Marvell Semiconductor
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
diff --git a/arch/arm/include/asm/hardware/dec21285.h b/arch/arm/include/asm/hardware/dec21285.h
index 3f18a56a025d..894f2a635cbb 100644
--- a/arch/arm/include/asm/hardware/dec21285.h
+++ b/arch/arm/include/asm/hardware/dec21285.h
@@ -22,6 +22,13 @@
#define DC21285_IO(x) (x)
#endif
+/*
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
+ * would clash with VGA cards.
+ */
+#define BUS_OFFSET 0xe0000000
+
#define CSR_PCICMD DC21285_IO(0x0004)
#define CSR_CLASSREV DC21285_IO(0x0008)
#define CSR_PCICACHELINESIZE DC21285_IO(0x000c)
@@ -81,19 +88,6 @@
#define SA110_CNTL_XCSDIR(x) ((x)<<28)
#define SA110_CNTL_PCICFN (1 << 31)
-/*
- * footbridge_cfn_mode() is used when we want
- * to check whether we are the central function
- */
-#define __footbridge_cfn_mode() (*CSR_SA110_CNTL & SA110_CNTL_PCICFN)
-#if defined(CONFIG_FOOTBRIDGE_HOST) && defined(CONFIG_FOOTBRIDGE_ADDIN)
-#define footbridge_cfn_mode() __footbridge_cfn_mode()
-#elif defined(CONFIG_FOOTBRIDGE_HOST)
-#define footbridge_cfn_mode() (1)
-#else
-#define footbridge_cfn_mode() (0)
-#endif
-
#define CSR_PCIADDR_EXTN DC21285_IO(0x0140)
#define CSR_PREFETCHMEMRANGE DC21285_IO(0x0144)
#define CSR_XBUS_CYCLE DC21285_IO(0x0148)
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
deleted file mode 100644
index f7692731e514..000000000000
--- a/arch/arm/include/asm/hardware/entry-macro-iomd.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-iomd.S
- *
- * Low-level IRQ helper macros for IOC/IOMD based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-/* IOC / IOMD based hardware */
-#include <asm/hardware/iomd.h>
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
- ldr \tmp, =irq_prio_h
- teq \irqstat, #0
-#ifdef IOMD_BASE
- ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma
- addeq \tmp, \tmp, #256 @ irq_prio_h table size
- teqeq \irqstat, #0
- bne 2406f
-#endif
- ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
- addeq \tmp, \tmp, #256 @ irq_prio_d table size
- teqeq \irqstat, #0
-#ifdef IOMD_IRQREQC
- ldrbeq \irqstat, [\base, #IOMD_IRQREQC]
- addeq \tmp, \tmp, #256 @ irq_prio_l table size
- teqeq \irqstat, #0
-#endif
-#ifdef IOMD_IRQREQD
- ldrbeq \irqstat, [\base, #IOMD_IRQREQD]
- addeq \tmp, \tmp, #256 @ irq_prio_lc table size
- teqeq \irqstat, #0
-#endif
-2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number
- .endm
-
-/*
- * Interrupt table (incorporates priority). Please note that we
- * rely on the order of these tables (see above code).
- */
- .align 5
-irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
-#ifdef IOMD_BASE
-irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
-#endif
-irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
-#ifdef IOMD_IRQREQC
-irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
-#endif
-#ifdef IOMD_IRQREQD
-irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
-#endif
-
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
deleted file mode 100644
index e175c2384f28..000000000000
--- a/arch/arm/include/asm/hardware/it8152.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/arm/hardware/it8152.h
- *
- * Copyright Compulab Ltd., 2006,2007
- * Mike Rapoport <mike@compulab.co.il>
- *
- * ITE 8152 companion chip register definitions
- */
-
-#ifndef __ASM_HARDWARE_IT8152_H
-#define __ASM_HARDWARE_IT8152_H
-
-#include <mach/irqs.h>
-
-extern void __iomem *it8152_base_address;
-
-#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
-#define IT8152_CFGREG_BASE (it8152_base_address + 0x03f00000)
-
-#define __REG_IT8152(x) (it8152_base_address + (x))
-
-#define IT8152_PCI_CFG_ADDR __REG_IT8152(0x3f00800)
-#define IT8152_PCI_CFG_DATA __REG_IT8152(0x3f00804)
-
-#define IT8152_INTC_LDCNIRR __REG_IT8152(0x3f00300)
-#define IT8152_INTC_LDPNIRR __REG_IT8152(0x3f00304)
-#define IT8152_INTC_LDCNIMR __REG_IT8152(0x3f00308)
-#define IT8152_INTC_LDPNIMR __REG_IT8152(0x3f0030C)
-#define IT8152_INTC_LDNITR __REG_IT8152(0x3f00310)
-#define IT8152_INTC_LDNIAR __REG_IT8152(0x3f00314)
-#define IT8152_INTC_LPCNIRR __REG_IT8152(0x3f00320)
-#define IT8152_INTC_LPPNIRR __REG_IT8152(0x3f00324)
-#define IT8152_INTC_LPCNIMR __REG_IT8152(0x3f00328)
-#define IT8152_INTC_LPPNIMR __REG_IT8152(0x3f0032C)
-#define IT8152_INTC_LPNITR __REG_IT8152(0x3f00330)
-#define IT8152_INTC_LPNIAR __REG_IT8152(0x3f00334)
-#define IT8152_INTC_PDCNIRR __REG_IT8152(0x3f00340)
-#define IT8152_INTC_PDPNIRR __REG_IT8152(0x3f00344)
-#define IT8152_INTC_PDCNIMR __REG_IT8152(0x3f00348)
-#define IT8152_INTC_PDPNIMR __REG_IT8152(0x3f0034C)
-#define IT8152_INTC_PDNITR __REG_IT8152(0x3f00350)
-#define IT8152_INTC_PDNIAR __REG_IT8152(0x3f00354)
-#define IT8152_INTC_INTC_TYPER __REG_IT8152(0x3f003FC)
-
-#define IT8152_GPIO_GPDR __REG_IT8152(0x3f00500)
-
-/*
- Interrupt controller per register summary:
- ---------------------------------------
- LCDNIRR:
- IT8152_LD_IRQ(8) PCICLK stop
- IT8152_LD_IRQ(7) MCLK ready
- IT8152_LD_IRQ(6) s/w
- IT8152_LD_IRQ(5) UART
- IT8152_LD_IRQ(4) GPIO
- IT8152_LD_IRQ(3) TIMER 4
- IT8152_LD_IRQ(2) TIMER 3
- IT8152_LD_IRQ(1) TIMER 2
- IT8152_LD_IRQ(0) TIMER 1
-
- LPCNIRR:
- IT8152_LP_IRQ(x) serial IRQ x
-
- PCIDNIRR:
- IT8152_PD_IRQ(14) PCISERR
- IT8152_PD_IRQ(13) CPU/PCI bridge target abort (h2pTADR)
- IT8152_PD_IRQ(12) CPU/PCI bridge master abort (h2pMADR)
- IT8152_PD_IRQ(11) PCI INTD
- IT8152_PD_IRQ(10) PCI INTC
- IT8152_PD_IRQ(9) PCI INTB
- IT8152_PD_IRQ(8) PCI INTA
- IT8152_PD_IRQ(7) serial INTD
- IT8152_PD_IRQ(6) serial INTC
- IT8152_PD_IRQ(5) serial INTB
- IT8152_PD_IRQ(4) serial INTA
- IT8152_PD_IRQ(3) serial IRQ IOCHK (IOCHKR)
- IT8152_PD_IRQ(2) chaining DMA (CDMAR)
- IT8152_PD_IRQ(1) USB (USBR)
- IT8152_PD_IRQ(0) Audio controller (ACR)
- */
-#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
-#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
-
-/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
-#define IT8152_LD_IRQ_COUNT 9
-#define IT8152_LP_IRQ_COUNT 16
-#define IT8152_PD_IRQ_COUNT 15
-
-/* Priorities: */
-#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
-#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
-#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
-
-/* frequently used interrupts */
-#define IT8152_PCISERR IT8152_PD_IRQ(14)
-#define IT8152_H2PTADR IT8152_PD_IRQ(13)
-#define IT8152_H2PMAR IT8152_PD_IRQ(12)
-#define IT8152_PCI_INTD IT8152_PD_IRQ(11)
-#define IT8152_PCI_INTC IT8152_PD_IRQ(10)
-#define IT8152_PCI_INTB IT8152_PD_IRQ(9)
-#define IT8152_PCI_INTA IT8152_PD_IRQ(8)
-#define IT8152_CDMA_INT IT8152_PD_IRQ(2)
-#define IT8152_USB_INT IT8152_PD_IRQ(1)
-#define IT8152_AUDIO_INT IT8152_PD_IRQ(0)
-
-struct pci_dev;
-struct pci_sys_data;
-
-extern void it8152_irq_demux(struct irq_desc *desc);
-extern void it8152_init_irq(void);
-extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_ops it8152_ops;
-
-#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index f8712e3c29cf..9fd9ad5d9202 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -158,8 +158,6 @@
#define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4)
#define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7))
-extern struct bus_type locomo_bus_type;
-
#define LOCOMO_DEVID_KEYBOARD 0
#define LOCOMO_DEVID_FRONTLIGHT 1
#define LOCOMO_DEVID_BACKLIGHT 2
@@ -188,14 +186,14 @@ struct locomo_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct locomo_dev *);
- int (*remove)(struct locomo_dev *);
+ void (*remove)(struct locomo_dev *);
};
#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
-void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+extern void locomolcd_power(int on);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index d134b9a5ff94..d8c6f8a99dfa 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -13,8 +13,6 @@
#ifndef _ASM_ARCH_SA1111
#define _ASM_ARCH_SA1111
-#include <mach/bitfield.h>
-
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
@@ -403,7 +401,7 @@ struct sa1111_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct sa1111_dev *);
- int (*remove)(struct sa1111_dev *);
+ void (*remove)(struct sa1111_dev *);
};
#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index eb4e4207cd3c..b4b66220952d 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -2,7 +2,8 @@
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
-#include <asm/kmap_types.h>
+#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
@@ -10,8 +11,6 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
-
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
@@ -20,9 +19,6 @@
extern pte_t *pkmap_page_table;
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
@@ -51,23 +47,32 @@ extern void kunmap_high(struct page *page);
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
-#else
+
+static inline void *arch_kmap_local_high_get(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
+ return NULL;
+ return kmap_high_get(page);
+}
+#define arch_kmap_local_high_get arch_kmap_local_high_get
+
+#else /* ARCH_NEEDS_KMAP_HIGH_GET */
static inline void *kmap_high_get(struct page *page)
{
return NULL;
}
-#endif
+#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
-/*
- * The following functions are already defined by <linux/highmem.h>
- * when CONFIG_HIGHMEM is not set.
- */
-#ifdef CONFIG_HIGHMEM
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
-#endif
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+do { \
+ if (cache_is_vivt()) \
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
+} while (0)
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_page(vaddr)
#endif
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
index 318dcf5921ab..a3a82b7158d4 100644
--- a/arch/arm/include/asm/hugetlb.h
+++ b/arch/arm/include/asm/hugetlb.h
@@ -10,19 +10,15 @@
#ifndef _ASM_ARM_HUGETLB_H
#define _ASM_ARM_HUGETLB_H
+#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/hugetlb-3level.h>
#include <asm-generic/hugetlb.h>
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr, unsigned long len)
-{
- return 0;
-}
-
static inline void arch_clear_hugepage_flags(struct page *page)
{
clear_bit(PG_dcache_clean, &page->flags);
}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index df8524365637..bd61502b9715 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -4,4 +4,7 @@
#include <asm/xen/hypervisor.h>
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
#endif
diff --git a/arch/arm/include/asm/ide.h b/arch/arm/include/asm/ide.h
deleted file mode 100644
index a81e0b0d6747..000000000000
--- a/arch/arm/include/asm/ide.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/include/asm/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the ARM architecture specific IDE code.
- */
-
-#ifndef __ASMARM_IDE_H
-#define __ASMARM_IDE_H
-
-#ifdef __KERNEL__
-
-#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
-#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
-#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
-#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMARM_IDE_H */
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index 73ba956e379f..baebb67b3512 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -3,10 +3,10 @@
#define __ASM_IDMAP_H
#include <linux/compiler.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
-#define __idmap __section(.idmap.text) noinline notrace
+#define __idmap __section(".idmap.text") noinline notrace
extern pgd_t *idmap_pgd;
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
index f20e08ac85ae..faf3d1c28368 100644
--- a/arch/arm/include/asm/insn.h
+++ b/arch/arm/include/asm/insn.h
@@ -2,6 +2,23 @@
#ifndef __ASM_ARM_INSN_H
#define __ASM_ARM_INSN_H
+#include <linux/types.h>
+
+/*
+ * Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
+ * appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
+ * which should be sufficient for the core kernel as well as modules loaded
+ * into the module region. (Not supported by LLD before release 14)
+ */
+#define LOAD_SYM_ARMV6(reg, sym) \
+ " .globl " #sym " \n\t" \
+ " .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
+ " .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
+ " .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
+ "10: sub " #reg ", pc, #8 \n\t" \
+ "11: sub " #reg ", " #reg ", #4 \n\t" \
+ "12: ldr " #reg ", [" #reg ", #0] \n\t"
+
static inline unsigned long
arm_gen_nop(void)
{
@@ -13,18 +30,18 @@ arm_gen_nop(void)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
static inline unsigned long
arm_gen_branch(unsigned long pc, unsigned long addr)
{
- return __arm_gen_branch(pc, addr, false);
+ return __arm_gen_branch(pc, addr, false, true);
}
static inline unsigned long
-arm_gen_branch_link(unsigned long pc, unsigned long addr)
+arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
{
- return __arm_gen_branch(pc, addr, true);
+ return __arm_gen_branch(pc, addr, true, warn);
}
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index aefdabdbeb84..1815748f5d2a 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -23,7 +23,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm-generic/pci_iomap.h>
/*
@@ -138,11 +138,10 @@ extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
-extern void __iounmap(volatile void __iomem *addr);
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
-extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
@@ -173,13 +172,16 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define PCI_IO_VIRT_BASE 0xfee00000
#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
-#if defined(CONFIG_PCI)
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
void pci_ioremap_set_mem_type(int mem_type);
#else
static inline void pci_ioremap_set_mem_type(int mem_type) {}
#endif
-extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+struct resource;
+
+#define pci_remap_iospace pci_remap_iospace
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
/*
* PCI configuration space mapping function.
@@ -196,32 +198,13 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
-#elif defined(CONFIG_PCI)
-#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
-#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
-#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
-#endif
-
-/*
- * This is the limit of PC card/PCI/ISA IO space, which is by default
- * 64K if we have PC card, PCI or ISA support. Otherwise, default to
- * zero to prevent ISA/PCI drivers claiming IO space (and potentially
- * oopsing.)
- *
- * Only set this larger if you really need inb() et.al. to operate over
- * a larger address space. Note that SOC_COMMON ioremaps each sockets
- * IO space area, and so inb() et.al. must be defined to operate as per
- * readb() et.al. on such platforms.
- */
-#ifndef IO_SPACE_LIMIT
-#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
-#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#endif
/*
@@ -356,7 +339,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*
* Function Memory type Cacheability Cache hint
* ioremap() Device n/a n/a
- * ioremap_nocache() Device n/a n/a
* ioremap_cache() Normal Writeback Read allocate
* ioremap_wc() Normal Non-cacheable n/a
* ioremap_wt() Normal Non-cacheable n/a
@@ -368,13 +350,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
* - unaligned accesses are "unpredictable"
* - writes may be delayed before they hit the endpoint device
*
- * ioremap_nocache() is the same as ioremap() as there are too many device
- * drivers using this for device registers, and documentation which tells
- * people to use it for such for this to be any different. This is not a
- * safe fallback for memory-like mappings, or memory regions where the
- * compiler may generate unaligned accesses - eg, via inlining its own
- * memcpy.
- *
* All normal memory mappings have the following properties:
* - reads can be repeated with no side effects
* - repeated reads return the last value written
@@ -403,7 +378,7 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
-void iounmap(volatile void __iomem *iomem_cookie);
+void iounmap(volatile void __iomem *io_addr);
#define iounmap iounmap
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
@@ -432,24 +407,15 @@ struct pci_dev;
#define pci_iounmap pci_iounmap
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#include <asm-generic/io.h>
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
-extern int devmem_is_allowed(unsigned long pfn);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif
/*
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 46d41140df27..26c1d2ced4ce 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -26,13 +26,13 @@
struct irqaction;
struct pt_regs;
-extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
-void init_IRQ(void);
#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self);
+ int exclude_cpu);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 000000000000..303c35df3135
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * The compiler uses a shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+asmlinkage void kasan_early_init(void);
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 000000000000..5739605aa7cf
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
+ * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
+ * addressable by a 32bit architecture) out of the virtual address
+ * space to use as shadow memory for KASan as follows:
+ *
+ * +----+ 0xffffffff
+ * | | \
+ * | | |-> Static kernel image (vmlinux) BSS and page table
+ * | |/
+ * +----+ PAGE_OFFSET
+ * | | \
+ * | | |-> Loadable kernel modules virtual address space area
+ * | |/
+ * +----+ MODULES_VADDR = KASAN_SHADOW_END
+ * | | \
+ * | | |-> The shadow area of kernel virtual address.
+ * | |/
+ * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULES_VADDR
+ * | | |
+ * | | |
+ * | | |-> The user space area in lowmem. The kernel address
+ * | | | sanitizer do not use this space, nor does it map it.
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | |/
+ * ------ 0
+ *
+ * 1) KASAN_SHADOW_START
+ * This value begins with the MODULE_VADDR's shadow address. It is the
+ * start of kernel virtual space. Since we have modules to load, we need
+ * to cover also that area with shadow memory so we can find memory
+ * bugs in modules.
+ *
+ * 2) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address: the mapping that would
+ * be after the end of the kernel memory at 0xffffffff. It is the end of
+ * kernel address sanitizer shadow area. It is also the start of the
+ * module area.
+ *
+ * 3) KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * As you would expect, >> 3 is equal to dividing by 8, meaning each
+ * byte in the shadow memory covers 8 bytes of kernel memory, so one
+ * bit shadow memory per byte of kernel memory is used.
+ *
+ * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
+ * on the VMSPLIT layout of the system: the kernel and userspace can
+ * split up lowmem in different ways according to needs, so we calculate
+ * the shadow offset depending on this.
+ */
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 22751b5b5735..a8287e7ab9d4 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -2,8 +2,6 @@
#ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H
-#ifdef CONFIG_KEXEC
-
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
@@ -56,9 +54,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
}
-/* Function pointer to optional machine-specific reinitialization */
-extern void (*kexec_reinit)(void);
-
static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
{
return phys_to_idmap(phys);
@@ -85,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_KEXEC */
-
#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644
index 000000000000..7980d0f2271f
--- /dev/null
+++ b/arch/arm/include/asm/kfence.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+ int i;
+ unsigned long pfn = PFN_DOWN(__pa(addr));
+ pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+ if (!pte)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+ pmd_t *pmd;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ pmd = pmd_off_k(addr);
+
+ if (pmd_leaf(*pmd)) {
+ if (split_pmd_page(pmd, addr & PMD_MASK))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ set_memory_valid(addr, 1, !protect);
+
+ return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
deleted file mode 100644
index 5590940ee43d..000000000000
--- a/arch/arm/include/asm/kmap_types.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARM_KMAP_TYPES_H
-#define __ARM_KMAP_TYPES_H
-
-/*
- * This is the "bare minimum". AIO seems to require this.
- */
-#define KM_TYPE_NR 16
-
-#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 213607a1f45c..5b8dbf1b0be4 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -40,24 +40,22 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
-extern __visible kprobe_opcode_t optprobe_template_add_sp;
-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
-extern __visible kprobe_opcode_t optprobe_template_restore_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
#define MAX_OPTIMIZED_LENGTH 4
#define MAX_OPTINSN_SIZE \
- ((unsigned long)&optprobe_template_end - \
- (unsigned long)&optprobe_template_entry)
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
#define RELATIVEJUMP_SIZE 4
struct arch_optimized_insn {
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
deleted file mode 100644
index 9c04bd810d07..000000000000
--- a/arch/arm/include/asm/kvm_arm.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ARM_H__
-#define __ARM_KVM_ARM_H__
-
-#include <linux/const.h>
-#include <linux/types.h>
-
-/* Hyp Configuration Register (HCR) bits */
-#define HCR_TGE (1 << 27)
-#define HCR_TVM (1 << 26)
-#define HCR_TTLB (1 << 25)
-#define HCR_TPU (1 << 24)
-#define HCR_TPC (1 << 23)
-#define HCR_TSW (1 << 22)
-#define HCR_TAC (1 << 21)
-#define HCR_TIDCP (1 << 20)
-#define HCR_TSC (1 << 19)
-#define HCR_TID3 (1 << 18)
-#define HCR_TID2 (1 << 17)
-#define HCR_TID1 (1 << 16)
-#define HCR_TID0 (1 << 15)
-#define HCR_TWE (1 << 14)
-#define HCR_TWI (1 << 13)
-#define HCR_DC (1 << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (1 << 10)
-#define HCR_FB (1 << 9)
-#define HCR_VA (1 << 8)
-#define HCR_VI (1 << 7)
-#define HCR_VF (1 << 6)
-#define HCR_AMO (1 << 5)
-#define HCR_IMO (1 << 4)
-#define HCR_FMO (1 << 3)
-#define HCR_PTW (1 << 2)
-#define HCR_SWIO (1 << 1)
-#define HCR_VM 1
-
-/*
- * The bits we set in HCR:
- * TAC: Trap ACTLR
- * TSC: Trap SMC
- * TVM: Trap VM ops (until MMU and caches are on)
- * TSW: Trap cache operations by set/way
- * TWI: Trap WFI
- * TWE: Trap WFE
- * TIDCP: Trap L2CTLR/L2ECTLR
- * BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
- * AMO: Override CPSR.A and enable signaling with VA
- * IMO: Override CPSR.I and enable signaling with VI
- * FMO: Override CPSR.F and enable signaling with VF
- * SWIO: Turn set/way invalidates into set/way clean+invalidate
- */
-#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
-
-/* System Control Register (SCTLR) bits */
-#define SCTLR_TE (1 << 30)
-#define SCTLR_EE (1 << 25)
-#define SCTLR_V (1 << 13)
-
-/* Hyp System Control Register (HSCTLR) bits */
-#define HSCTLR_TE (1 << 30)
-#define HSCTLR_EE (1 << 25)
-#define HSCTLR_FI (1 << 21)
-#define HSCTLR_WXN (1 << 19)
-#define HSCTLR_I (1 << 12)
-#define HSCTLR_C (1 << 2)
-#define HSCTLR_A (1 << 1)
-#define HSCTLR_M 1
-#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
- HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
-
-/* TTBCR and HTCR Registers bits */
-#define TTBCR_EAE (1 << 31)
-#define TTBCR_IMP (1 << 30)
-#define TTBCR_SH1 (3 << 28)
-#define TTBCR_ORGN1 (3 << 26)
-#define TTBCR_IRGN1 (3 << 24)
-#define TTBCR_EPD1 (1 << 23)
-#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (7 << 16)
-#define TTBCR_SH0 (3 << 12)
-#define TTBCR_ORGN0 (3 << 10)
-#define TTBCR_IRGN0 (3 << 8)
-#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ (7 << 0)
-#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
-
-/* Hyp System Trap Register */
-#define HSTR_T(x) (1 << x)
-#define HSTR_TTEE (1 << 16)
-#define HSTR_TJDBX (1 << 17)
-
-/* Hyp Coprocessor Trap Register */
-#define HCPTR_TCP(x) (1 << x)
-#define HCPTR_TCP_MASK (0x3fff)
-#define HCPTR_TASE (1 << 15)
-#define HCPTR_TTA (1 << 20)
-#define HCPTR_TCPAC (1 << 31)
-
-/* Hyp Debug Configuration Register bits */
-#define HDCR_TDRA (1 << 11)
-#define HDCR_TDOSA (1 << 10)
-#define HDCR_TDA (1 << 9)
-#define HDCR_TDE (1 << 8)
-#define HDCR_HPME (1 << 7)
-#define HDCR_TPM (1 << 6)
-#define HDCR_TPMCR (1 << 5)
-#define HDCR_HPMN_MASK (0x1F)
-
-/*
- * The architecture supports 40-bit IPA as input to the 2nd stage translations
- * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
- * space.
- */
-#define KVM_PHYS_SHIFT (40)
-
-#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
-
-/* Virtualization Translation Control Register (VTCR) bits */
-#define VTCR_SH0 (3 << 12)
-#define VTCR_ORGN0 (3 << 10)
-#define VTCR_IRGN0 (3 << 8)
-#define VTCR_SL0 (3 << 6)
-#define VTCR_S (1 << 4)
-#define VTCR_T0SZ (0xf)
-#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
- VTCR_S | VTCR_T0SZ)
-#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
-#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
-#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
-#define KVM_VTCR_SL0 VTCR_SL_L1
-/* stage-2 input address range defined as 2^(32-T0SZ) */
-#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
-#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
-#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
-
-/* Virtualization Translation Table Base Register (VTTBR) bits */
-#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
-#define VTTBR_X (14 - KVM_T0SZ)
-#else
-#define VTTBR_X (5 - KVM_T0SZ)
-#endif
-#define VTTBR_CNP_BIT _AC(1, UL)
-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
-#define VTTBR_VMID_SHIFT _AC(48, ULL)
-#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
-
-/* Hyp Syndrome Register (HSR) bits */
-#define HSR_EC_SHIFT (26)
-#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
-#define HSR_IL (_AC(1, UL) << 25)
-#define HSR_ISS (HSR_IL - 1)
-#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
-#define HSR_SRT_SHIFT (16)
-#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
-#define HSR_CM (1 << 8)
-#define HSR_FSC (0x3f)
-#define HSR_FSC_TYPE (0x3c)
-#define HSR_SSE (1 << 21)
-#define HSR_WNR (1 << 6)
-#define HSR_CV_SHIFT (24)
-#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
-#define HSR_COND_SHIFT (20)
-#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
-
-#define FSC_FAULT (0x04)
-#define FSC_ACCESS (0x08)
-#define FSC_PERM (0x0c)
-#define FSC_SEA (0x10)
-#define FSC_SEA_TTW0 (0x14)
-#define FSC_SEA_TTW1 (0x15)
-#define FSC_SEA_TTW2 (0x16)
-#define FSC_SEA_TTW3 (0x17)
-#define FSC_SECC (0x18)
-#define FSC_SECC_TTW0 (0x1c)
-#define FSC_SECC_TTW1 (0x1d)
-#define FSC_SECC_TTW2 (0x1e)
-#define FSC_SECC_TTW3 (0x1f)
-
-/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xf)
-
-#define HSR_EC_UNKNOWN (0x00)
-#define HSR_EC_WFI (0x01)
-#define HSR_EC_CP15_32 (0x03)
-#define HSR_EC_CP15_64 (0x04)
-#define HSR_EC_CP14_MR (0x05)
-#define HSR_EC_CP14_LS (0x06)
-#define HSR_EC_CP_0_13 (0x07)
-#define HSR_EC_CP10_ID (0x08)
-#define HSR_EC_JAZELLE (0x09)
-#define HSR_EC_BXJ (0x0A)
-#define HSR_EC_CP14_64 (0x0C)
-#define HSR_EC_SVC_HYP (0x11)
-#define HSR_EC_HVC (0x12)
-#define HSR_EC_SMC (0x13)
-#define HSR_EC_IABT (0x20)
-#define HSR_EC_IABT_HYP (0x21)
-#define HSR_EC_DABT (0x24)
-#define HSR_EC_DABT_HYP (0x25)
-#define HSR_EC_MAX (0x3f)
-
-#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-
-#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-
-#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
-#define HSR_DABT_CM (_AC(1, UL) << 8)
-
-#define kvm_arm_exception_type \
- {0, "RESET" }, \
- {1, "UNDEFINED" }, \
- {2, "SOFTWARE" }, \
- {3, "PREF_ABORT" }, \
- {4, "DATA_ABORT" }, \
- {5, "IRQ" }, \
- {6, "FIQ" }, \
- {7, "HVC" }
-
-#define HSRECN(x) { HSR_EC_##x, #x }
-
-#define kvm_arm_exception_class \
- HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
- HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
- HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
- HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
- HSRECN(DABT), HSRECN(DABT_HYP)
-
-
-#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
deleted file mode 100644
index f615830f9f57..000000000000
--- a/arch/arm/include/asm/kvm_asm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ASM_H__
-#define __ARM_KVM_ASM_H__
-
-#include <asm/virt.h>
-
-#define ARM_EXIT_WITH_ABORT_BIT 31
-#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
-#define ARM_EXCEPTION_IS_TRAP(x) \
- (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
-#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
-
-#define ARM_EXCEPTION_RESET 0
-#define ARM_EXCEPTION_UNDEFINED 1
-#define ARM_EXCEPTION_SOFTWARE 2
-#define ARM_EXCEPTION_PREF_ABORT 3
-#define ARM_EXCEPTION_DATA_ABORT 4
-#define ARM_EXCEPTION_IRQ 5
-#define ARM_EXCEPTION_FIQ 6
-#define ARM_EXCEPTION_HVC 7
-#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-/*
- * The rr_lo_hi macro swaps a pair of registers depending on
- * current endianness. It is used in conjunction with ldrd and strd
- * instructions that load/store a 64-bit value from/to memory to/from
- * a pair of registers which are used with the mrrc and mcrr instructions.
- * If used with the ldrd/strd instructions, the a1 parameter is the first
- * source/destination register and the a2 parameter is the second
- * source/destination register. Note that the ldrd/strd instructions
- * already swap the bytes within the words correctly according to the
- * endianness setting, but the order of the registers need to be effectively
- * swapped when used with the mrrc/mcrr instructions.
- */
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define rr_lo_hi(a1, a2) a2, a1
-#else
-#define rr_lo_hi(a1, a2) a1, a2
-#endif
-
-#define kvm_ksym_ref(kva) (kva)
-
-#ifndef __ASSEMBLY__
-struct kvm;
-struct kvm_vcpu;
-
-extern char __kvm_hyp_init[];
-extern char __kvm_hyp_init_end[];
-
-extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
-
-extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
-
-/* no VHE on 32-bit :( */
-static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
-
-extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
-
-extern void __init_stage2_translation(void);
-
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern u64 __vgic_v3_read_vmcr(void);
-extern void __vgic_v3_write_vmcr(u32 vmcr);
-extern void __vgic_v3_init_lrs(void);
-
-#endif
-
-#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
deleted file mode 100644
index a23826117dd6..000000000000
--- a/arch/arm/include/asm/kvm_coproc.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Rusty Russell IBM Corporation
- */
-
-#ifndef __ARM_KVM_COPROC_H__
-#define __ARM_KVM_COPROC_H__
-#include <linux/kvm_host.h>
-
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_coproc_target_table {
- unsigned target;
- const struct coproc_reg *table;
- size_t num;
-};
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-void kvm_coproc_table_init(void);
-
-struct kvm_one_reg;
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
deleted file mode 100644
index 9b118516d2db..000000000000
--- a/arch/arm/include/asm/kvm_emulate.h
+++ /dev/null
@@ -1,351 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_EMULATE_H__
-#define __ARM_KVM_EMULATE_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_arm.h>
-#include <asm/cputype.h>
-
-/* arm64 compatibility macros */
-#define PSR_AA32_MODE_ABT ABT_MODE
-#define PSR_AA32_MODE_UND UND_MODE
-#define PSR_AA32_T_BIT PSR_T_BIT
-#define PSR_AA32_I_BIT PSR_I_BIT
-#define PSR_AA32_A_BIT PSR_A_BIT
-#define PSR_AA32_E_BIT PSR_E_BIT
-#define PSR_AA32_IT_MASK PSR_IT_MASK
-
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-
-static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
-{
- return vcpu_reg(vcpu, reg_num);
-}
-
-unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
-
-static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
-{
- return *__vcpu_spsr(vcpu);
-}
-
-static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
-{
- *__vcpu_spsr(vcpu) = v;
-}
-
-static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
- u8 reg_num)
-{
- return *vcpu_reg(vcpu, reg_num);
-}
-
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
- unsigned long val)
-{
- *vcpu_reg(vcpu, reg_num) = val;
-}
-
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
-void kvm_inject_undef32(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-
-static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
-{
- kvm_inject_undef32(vcpu);
-}
-
-static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_dabt32(vcpu, addr);
-}
-
-static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_pabt32(vcpu, addr);
-}
-
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
-{
- return kvm_condition_valid32(vcpu);
-}
-
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- kvm_skip_instr32(vcpu, is_wide_instr);
-}
-
-static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr = HCR_GUEST_MASK;
-}
-
-static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.hcr;
-}
-
-static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr &= ~HCR_TWE;
-}
-
-static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr |= HCR_TWE;
-}
-
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
-{
- return true;
-}
-
-static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
-}
-
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
-}
-
-static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-}
-
-static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
-}
-
-static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;
-}
-
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hsr;
-}
-
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
-{
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
-
- if (hsr & HSR_CV)
- return (hsr & HSR_COND) >> HSR_COND_SHIFT;
-
- return -1;
-}
-
-static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hxfar;
-}
-
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
-{
- return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
-}
-
-static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
-}
-
-static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
-}
-
-static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
-}
-
-static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-}
-
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
-}
-
-static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
-}
-
-/* Get Access Size from a data abort */
-static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
-{
- switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
- case 0:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return -EFAULT;
- }
-}
-
-/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
-}
-
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
-}
-
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- switch (kvm_vcpu_trap_get_fault(vcpu)) {
- case FSC_SEA:
- case FSC_SEA_TTW0:
- case FSC_SEA_TTW1:
- case FSC_SEA_TTW2:
- case FSC_SEA_TTW3:
- case FSC_SECC:
- case FSC_SECC_TTW0:
- case FSC_SECC_TTW1:
- case FSC_SECC_TTW2:
- case FSC_SECC_TTW3:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
-}
-
-static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
-{
- return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
-}
-
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- default:
- return be32_to_cpu(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return le16_to_cpu(data & 0xffff);
- default:
- return le32_to_cpu(data);
- }
- }
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- default:
- return cpu_to_be32(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_le16(data & 0xffff);
- default:
- return cpu_to_le32(data);
- }
- }
-}
-
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
-
-#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
deleted file mode 100644
index 556cd818eccf..000000000000
--- a/arch/arm/include/asm/kvm_host.h
+++ /dev/null
@@ -1,457 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_HOST_H__
-#define __ARM_KVM_HOST_H__
-
-#include <linux/arm-smccc.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/kvm_types.h>
-#include <asm/cputype.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmio.h>
-#include <asm/fpstate.h>
-#include <kvm/arm_arch_timer.h>
-
-#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_HAVE_ONE_REG
-#define KVM_HALT_POLL_NS_DEFAULT 500000
-
-#define KVM_VCPU_MAX_FEATURES 2
-
-#include <kvm/arm_vgic.h>
-
-
-#ifdef CONFIG_ARM_GIC_V3
-#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#else
-#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
-#endif
-
-#define KVM_REQ_SLEEP \
- KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-
-DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-
-static inline int kvm_arm_init_sve(void) { return 0; }
-
-u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int __attribute_const__ kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_vmid {
- /* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
-};
-
-struct kvm_arch {
- /* The last vcpu id that ran on each physical CPU */
- int __percpu *last_vcpu_ran;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* The VMID generation used for the virt. memory system */
- struct kvm_vmid vmid;
-
- /* Stage-2 page table */
- pgd_t *pgd;
- phys_addr_t pgd_phys;
-
- /* Interrupt controller */
- struct vgic_dist vgic;
- int max_vcpus;
-
- /* Mandated version of PSCI */
- u32 psci_version;
-
- /*
- * If we encounter a data abort without valid instruction syndrome
- * information, report this to user space. User space can (and
- * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
- * supported.
- */
- bool return_nisv_io_abort_to_user;
-};
-
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
-struct kvm_vcpu_fault_info {
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
-};
-
-/*
- * 0 is reserved as an invalid value.
- * Order should be kept in sync with the save/restore code.
- */
-enum vcpu_sysreg {
- __INVALID_SYSREG__,
- c0_MPIDR, /* MultiProcessor ID Register */
- c0_CSSELR, /* Cache Size Selection Register */
- c1_SCTLR, /* System Control Register */
- c1_ACTLR, /* Auxiliary Control Register */
- c1_CPACR, /* Coprocessor Access Control */
- c2_TTBR0, /* Translation Table Base Register 0 */
- c2_TTBR0_high, /* TTBR0 top 32 bits */
- c2_TTBR1, /* Translation Table Base Register 1 */
- c2_TTBR1_high, /* TTBR1 top 32 bits */
- c2_TTBCR, /* Translation Table Base Control R. */
- c3_DACR, /* Domain Access Control Register */
- c5_DFSR, /* Data Fault Status Register */
- c5_IFSR, /* Instruction Fault Status Register */
- c5_ADFSR, /* Auxilary Data Fault Status R */
- c5_AIFSR, /* Auxilary Instrunction Fault Status R */
- c6_DFAR, /* Data Fault Address Register */
- c6_IFAR, /* Instruction Fault Address Register */
- c7_PAR, /* Physical Address Register */
- c7_PAR_high, /* PAR top 32 bits */
- c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
- c10_PRRR, /* Primary Region Remap Register */
- c10_NMRR, /* Normal Memory Remap Register */
- c12_VBAR, /* Vector Base Address Register */
- c13_CID, /* Context ID Register */
- c13_TID_URW, /* Thread ID, User R/W */
- c13_TID_URO, /* Thread ID, User R/O */
- c13_TID_PRIV, /* Thread ID, Privileged */
- c14_CNTKCTL, /* Timer Control Register (PL1) */
- c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
- c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
- NR_CP15_REGS /* Number of regs (incl. invalid) */
-};
-
-struct kvm_cpu_context {
- struct kvm_regs gp_regs;
- struct vfp_hard_struct vfp;
- u32 cp15[NR_CP15_REGS];
-};
-
-struct kvm_host_data {
- struct kvm_cpu_context host_ctxt;
-};
-
-typedef struct kvm_host_data kvm_host_data_t;
-
-static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
-{
- /* The host's MPIDR is immutable, so let's set it up at boot time */
- cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
-}
-
-struct vcpu_reset_state {
- unsigned long pc;
- unsigned long r0;
- bool be;
- bool reset;
-};
-
-struct kvm_vcpu_arch {
- struct kvm_cpu_context ctxt;
-
- int target; /* Processor target */
- DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
- /* The CPU type we expose to the VM */
- u32 midr;
-
- /* HYP trapping configuration */
- u32 hcr;
-
- /* Exception Information */
- struct kvm_vcpu_fault_info fault;
-
- /* Host FP context */
- struct kvm_cpu_context *host_cpu_context;
-
- /* VGIC state */
- struct vgic_cpu vgic_cpu;
- struct arch_timer_cpu timer_cpu;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* vcpu power-off state */
- bool power_off;
-
- /* Don't run the guest (internal implementation need) */
- bool pause;
-
- /* IO related fields */
- struct kvm_decode mmio_decode;
-
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
- struct vcpu_reset_state reset_state;
-
- /* Detect first run of a vcpu */
- bool has_run_once;
-};
-
-struct kvm_vm_stat {
- ulong remote_tlb_flush;
-};
-
-struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
- u64 hvc_exit_stat;
- u64 wfe_exit_stat;
- u64 wfi_exit_stat;
- u64 mmio_exit_user;
- u64 mmio_exit_kernel;
- u64 exits;
-};
-
-#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
-
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-
-unsigned long __kvm_call_hyp(void *hypfn, ...);
-
-/*
- * The has_vhe() part doesn't get emitted, but is used for type-checking.
- */
-#define kvm_call_hyp(f, ...) \
- do { \
- if (has_vhe()) { \
- f(__VA_ARGS__); \
- } else { \
- __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
- } \
- } while(0)
-
-#define kvm_call_hyp_ret(f, ...) \
- ({ \
- typeof(f(__VA_ARGS__)) ret; \
- \
- if (has_vhe()) { \
- ret = f(__VA_ARGS__); \
- } else { \
- ret = __kvm_call_hyp(kvm_ksym_ref(f), \
- ##__VA_ARGS__); \
- } \
- \
- ret; \
- })
-
-void force_vm_exit(const cpumask_t *mask);
-int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
-
-int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
-
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
-void kvm_arm_halt_guest(struct kvm *kvm);
-void kvm_arm_resume_guest(struct kvm *kvm);
-
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
-
-static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index) {}
-
-static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
- unsigned long hyp_stack_ptr,
- unsigned long vector_ptr)
-{
- /*
- * Call initialization code, and switch to the full blown HYP
- * code. The init code doesn't need to preserve these
- * registers as r0-r3 are already callee saved according to
- * the AAPCS.
- * Note that we slightly misuse the prototype by casting the
- * stack pointer to a void *.
-
- * The PGDs are always passed as the third argument, in order
- * to be passed into r2-r3 to the init code (yes, this is
- * compliant with the PCS!).
- */
-
- __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
-}
-
-static inline void __cpu_init_stage2(void)
-{
- kvm_call_hyp(__init_stage2_translation);
-}
-
-static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
- return 0;
-}
-
-int kvm_perf_init(void);
-int kvm_perf_teardown(void);
-
-static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
-{
- return SMCCC_RET_NOT_SUPPORTED;
-}
-
-static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
-{
- return GPA_INVALID;
-}
-
-static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
-{
-}
-
-static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
-{
- return false;
-}
-
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-
-static inline bool kvm_arch_requires_vhe(void) { return false; }
-static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_arm_init_debug(void) {}
-static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-
-int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-
-/*
- * VFP/NEON switching is all done by the hyp switch code, so no need to
- * coordinate with host context handling for this state:
- */
-static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_arm_vhe_guest_enter(void) {}
-static inline void kvm_arm_vhe_guest_exit(void) {}
-
-#define KVM_BP_HARDEN_UNKNOWN -1
-#define KVM_BP_HARDEN_WA_NEEDED 0
-#define KVM_BP_HARDEN_NOT_REQUIRED 1
-
-static inline int kvm_arm_harden_branch_predictor(void)
-{
- switch(read_cpuid_part()) {
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- case ARM_CPU_PART_BRAHMA_B15:
- case ARM_CPU_PART_CORTEX_A12:
- case ARM_CPU_PART_CORTEX_A15:
- case ARM_CPU_PART_CORTEX_A17:
- return KVM_BP_HARDEN_WA_NEEDED;
-#endif
- case ARM_CPU_PART_CORTEX_A7:
- return KVM_BP_HARDEN_NOT_REQUIRED;
- default:
- return KVM_BP_HARDEN_UNKNOWN;
- }
-}
-
-#define KVM_SSBD_UNKNOWN -1
-#define KVM_SSBD_FORCE_DISABLE 0
-#define KVM_SSBD_KERNEL 1
-#define KVM_SSBD_FORCE_ENABLE 2
-#define KVM_SSBD_MITIGATED 3
-
-static inline int kvm_arm_have_ssbd(void)
-{
- /* No way to detect it yet, pretend it is not there. */
- return KVM_SSBD_UNKNOWN;
-}
-
-static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
-
-#define __KVM_HAVE_ARCH_VM_ALLOC
-struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
-
-static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
-{
- /*
- * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
- * so any non-zero value used as type is illegal.
- */
- if (type)
- return -EINVAL;
- return 0;
-}
-
-static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
-{
- return -EINVAL;
-}
-
-static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
-{
- return true;
-}
-
-#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
deleted file mode 100644
index 40e9034db601..000000000000
--- a/arch/arm/include/asm/kvm_hyp.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#ifndef __ARM_KVM_HYP_H__
-#define __ARM_KVM_HYP_H__
-
-#include <linux/compiler.h>
-#include <linux/kvm_host.h>
-#include <asm/cp15.h>
-#include <asm/vfp.h>
-
-#define __hyp_text __section(.hyp.text) notrace
-
-#define __ACCESS_VFP(CRn) \
- "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
-
-#define write_special(v, r) \
- asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
-#define read_special(r) ({ \
- u32 __val; \
- asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
- __val; \
-})
-
-#define TTBR0 __ACCESS_CP15_64(0, c2)
-#define TTBR1 __ACCESS_CP15_64(1, c2)
-#define VTTBR __ACCESS_CP15_64(6, c2)
-#define PAR __ACCESS_CP15_64(0, c7)
-#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
-#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
-#define CNTVOFF __ACCESS_CP15_64(4, c14)
-
-#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
-#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
-#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
-#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
-#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
-#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
-#define HCR __ACCESS_CP15(c1, 4, c1, 0)
-#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
-#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
-#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
-#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
-#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
-#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
-#define DACR __ACCESS_CP15(c3, 0, c0, 0)
-#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
-#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
-#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
-#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
-#define HSR __ACCESS_CP15(c5, 4, c2, 0)
-#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
-#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
-#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
-#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
-#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
-#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
-#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
-#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
-#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
-#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
-#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
-#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
-#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
-#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
-#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
-#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
-#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
-#define CID __ACCESS_CP15(c13, 0, c0, 1)
-#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
-#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
-#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
-#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
-#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
-#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
-#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
-#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
-
-#define VFP_FPEXC __ACCESS_VFP(FPEXC)
-
-/* AArch64 compatibility macros, only for the timer so far */
-#define read_sysreg_el0(r) read_sysreg(r##_EL0)
-#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
-
-#define SYS_CNTP_CTL_EL0 CNTP_CTL
-#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
-#define SYS_CNTV_CTL_EL0 CNTV_CTL
-#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
-
-#define cntvoff_el2 CNTVOFF
-#define cnthctl_el2 CNTHCTL
-
-void __timer_enable_traps(struct kvm_vcpu *vcpu);
-void __timer_disable_traps(struct kvm_vcpu *vcpu);
-
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-
-void __sysreg_save_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
-
-void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
-void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
-void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
-
-asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
-asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
-static inline bool __vfp_enabled(void)
-{
- return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
-}
-
-void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
-void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
-
-asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *host);
-asmlinkage int __hyp_do_panic(const char *, int, u32);
-
-#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
deleted file mode 100644
index 7c0eddb0adb2..000000000000
--- a/arch/arm/include/asm/kvm_mmio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_MMIO_H__
-#define __ARM_KVM_MMIO_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-struct kvm_decode {
- unsigned long rt;
- bool sign_extend;
-};
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
-
-#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
deleted file mode 100644
index 0d84d50bf9ba..000000000000
--- a/arch/arm/include/asm/kvm_mmu.h
+++ /dev/null
@@ -1,435 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_MMU_H__
-#define __ARM_KVM_MMU_H__
-
-#include <asm/memory.h>
-#include <asm/page.h>
-
-/*
- * We directly use the kernel VA for the HYP, as we can directly share
- * the mapping (HTTBR "covers" TTBR1).
- */
-#define kern_hyp_va(kva) (kva)
-
-/* Contrary to arm64, there is no need to generate a PC-relative address */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr = &(s); \
- addr; \
- })
-
-#ifndef __ASSEMBLY__
-
-#include <linux/highmem.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_hyp.h>
-#include <asm/pgalloc.h>
-#include <asm/stage2_pgtable.h>
-
-/* Ensure compatibility with arm64 */
-#define VA_BITS 32
-
-#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
-#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
-#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
-#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
-
-#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
-
-int create_hyp_mappings(void *from, void *to, pgprot_t prot);
-int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
- void __iomem **kaddr,
- void __iomem **haddr);
-int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
- void **haddr);
-void free_hyp_pgds(void);
-
-void stage2_unmap_vm(struct kvm *kvm);
-int kvm_alloc_stage2_pgd(struct kvm *kvm);
-void kvm_free_stage2_pgd(struct kvm *kvm);
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable);
-
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
-
-phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_get_idmap_vector(void);
-int kvm_mmu_init(void);
-void kvm_clear_hyp_idmap(void);
-
-#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
-#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
-#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
-
-#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
-#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
-#define kvm_pfn_pud(pfn, prot) (__pud(0))
-
-#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
-
-
-#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
-/* No support for pud hugepages */
-#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
-
-/*
- * The following kvm_*pud*() functions are provided strictly to allow
- * sharing code with arm64. They should never be called in practice.
- */
-static inline void kvm_set_s2pud_readonly(pud_t *pud)
-{
- WARN_ON(1);
-}
-
-static inline bool kvm_s2pud_readonly(pud_t *pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
-{
- WARN_ON(1);
-}
-
-static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
-{
- WARN_ON(1);
- return pud;
-}
-
-static inline pud_t kvm_s2pud_mkexec(pud_t pud)
-{
- WARN_ON(1);
- return pud;
-}
-
-static inline bool kvm_s2pud_exec(pud_t *pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
-{
- BUG();
- return pud;
-}
-
-static inline bool kvm_s2pud_young(pud_t pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
-{
- pte_val(pte) |= L_PTE_S2_RDWR;
- return pte;
-}
-
-static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
-{
- pmd_val(pmd) |= L_PMD_S2_RDWR;
- return pmd;
-}
-
-static inline pte_t kvm_s2pte_mkexec(pte_t pte)
-{
- pte_val(pte) &= ~L_PTE_XN;
- return pte;
-}
-
-static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
-{
- pmd_val(pmd) &= ~PMD_SECT_XN;
- return pmd;
-}
-
-static inline void kvm_set_s2pte_readonly(pte_t *pte)
-{
- pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
-}
-
-static inline bool kvm_s2pte_readonly(pte_t *pte)
-{
- return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
-}
-
-static inline bool kvm_s2pte_exec(pte_t *pte)
-{
- return !(pte_val(*pte) & L_PTE_XN);
-}
-
-static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
-{
- pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
-{
- return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_s2pmd_exec(pmd_t *pmd)
-{
- return !(pmd_val(*pmd) & PMD_SECT_XN);
-}
-
-static inline bool kvm_page_empty(void *ptr)
-{
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
-}
-
-#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define kvm_pud_table_empty(kvm, pudp) false
-
-#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define hyp_pud_table_empty(pudp) false
-
-struct kvm;
-
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
-
-static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
-{
- return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
-}
-
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- /*
- * Clean the dcache to the Point of Coherency.
- *
- * We need to do this through a kernel mapping (using the
- * user-space mapping has proved to be the wrong
- * solution). For that, we need to kmap one page at a time,
- * and iterate over the range.
- */
-
- VM_BUG_ON(size & ~PAGE_MASK);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- size -= PAGE_SIZE;
- pfn++;
-
- kunmap_atomic(va);
- }
-}
-
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
- unsigned long size)
-{
- u32 iclsz;
-
- /*
- * If we are going to insert an instruction page and the icache is
- * either VIPT or PIPT, there is a potential problem where the host
- * (or another VM) may have used the same page as this guest, and we
- * read incorrect data from the icache. If we're using a PIPT cache,
- * we can invalidate just that page, but if we are using a VIPT cache
- * we need to invalidate the entire icache - damn shame - as written
- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
- *
- * VIVT caches are tagged using both the ASID and the VMID and doesn't
- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
- */
-
- VM_BUG_ON(size & ~PAGE_MASK);
-
- if (icache_is_vivt_asid_tagged())
- return;
-
- if (!icache_is_pipt()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
- return;
- }
-
- /*
- * CTR IminLine contains Log2 of the number of words in the
- * cache line, so we can get the number of words as
- * 2 << (IminLine - 1). To get the number of bytes, we
- * multiply by 4 (the number of bytes in a 32-bit word), and
- * get 4 << (IminLine).
- */
- iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
- void *end = va + PAGE_SIZE;
- void *addr = va;
-
- do {
- write_sysreg(addr, ICIMVAU);
- addr += iclsz;
- } while (addr < end);
-
- dsb(ishst);
- isb();
-
- size -= PAGE_SIZE;
- pfn++;
-
- kunmap_atomic(va);
- }
-
- /* Check if we need to invalidate the BTB */
- if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
- write_sysreg(0, BPIALLIS);
- dsb(ishst);
- isb();
- }
-}
-
-static inline void __kvm_flush_dcache_pte(pte_t pte)
-{
- void *va = kmap_atomic(pte_page(pte));
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- kunmap_atomic(va);
-}
-
-static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
-{
- unsigned long size = PMD_SIZE;
- kvm_pfn_t pfn = pmd_pfn(pmd);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- pfn++;
- size -= PAGE_SIZE;
-
- kunmap_atomic(va);
- }
-}
-
-static inline void __kvm_flush_dcache_pud(pud_t pud)
-{
-}
-
-#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-
-void kvm_set_way_flush(struct kvm_vcpu *vcpu);
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
-
-static inline bool __kvm_cpu_uses_extended_idmap(void)
-{
- return false;
-}
-
-static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
-{
- return PTRS_PER_PGD;
-}
-
-static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
- pgd_t *hyp_pgd,
- pgd_t *merged_hyp_pgd,
- unsigned long hyp_idmap_start) { }
-
-static inline unsigned int kvm_get_vmid_bits(void)
-{
- return 8;
-}
-
-/*
- * We are not in the kvm->srcu critical section most of the time, so we take
- * the SRCU read lock here. Since we copy the data from the user page, we
- * can immediately drop the lock again.
- */
-static inline int kvm_read_guest_lock(struct kvm *kvm,
- gpa_t gpa, void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_read_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
-static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
- const void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_write_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
-static inline void *kvm_get_hyp_vector(void)
-{
- switch(read_cpuid_part()) {
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- case ARM_CPU_PART_CORTEX_A12:
- case ARM_CPU_PART_CORTEX_A17:
- {
- extern char __kvm_hyp_vector_bp_inv[];
- return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
- }
-
- case ARM_CPU_PART_BRAHMA_B15:
- case ARM_CPU_PART_CORTEX_A15:
- {
- extern char __kvm_hyp_vector_ic_inv[];
- return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
- }
-#endif
- default:
- {
- extern char __kvm_hyp_vector[];
- return kvm_ksym_ref(__kvm_hyp_vector);
- }
- }
-}
-
-static inline int kvm_map_vectors(void)
-{
- return 0;
-}
-
-static inline int hyp_map_aux_data(void)
-{
- return 0;
-}
-
-#define kvm_phys_to_vttbr(addr) (addr)
-
-static inline void kvm_set_ipa_limit(void) {}
-
-static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
-{
- struct kvm_vmid *vmid = &kvm->arch.vmid;
- u64 vmid_field, baddr;
-
- baddr = kvm->arch.pgd_phys;
- vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
- return kvm_phys_to_vttbr(baddr) | vmid_field;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_ras.h b/arch/arm/include/asm/kvm_ras.h
deleted file mode 100644
index e9577292dfe4..000000000000
--- a/arch/arm/include/asm/kvm_ras.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 - Arm Ltd */
-
-#ifndef __ARM_KVM_RAS_H__
-#define __ARM_KVM_RAS_H__
-
-#include <linux/types.h>
-
-static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
-{
- return -1;
-}
-
-#endif /* __ARM_KVM_RAS_H__ */
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index e7df5a822cab..2b18a258204d 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -56,9 +56,6 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
- void (*handle_irq)(struct pt_regs *);
-#endif
void (*restart)(enum reboot_mode, const char *);
};
@@ -81,7 +78,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define MACHINE_START(_type,_name) \
static const struct machine_desc __mach_desc_##_type \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = MACH_TYPE_##_type, \
.name = _name,
@@ -91,7 +88,7 @@ static const struct machine_desc __mach_desc_##_type \
#define DT_MACHINE_START(_name, _namestr) \
static const struct machine_desc __mach_desc_##_name \
__used \
- __attribute__((__section__(".arch.info.init"))) = { \
+ __section(".arch.info.init") = { \
.nr = ~0, \
.name = _namestr,
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
index 1506422af383..5ec11d7f0d04 100644
--- a/arch/arm/include/asm/mach/dma.h
+++ b/arch/arm/include/asm/mach/dma.h
@@ -44,8 +44,3 @@ struct dma_struct {
* isa_dma_add - add an ISA-style DMA channel
*/
extern int isa_dma_add(unsigned int, dma_t *dma);
-
-/*
- * Add the ISA DMA controller. Always takes channels 0-7.
- */
-extern void isa_init_dma(void);
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 92282558caf7..2b8970d8e5a2 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -27,6 +27,7 @@ enum {
MT_HIGH_VECTORS,
MT_MEMORY_RWX,
MT_MEMORY_RW,
+ MT_MEMORY_RO,
MT_ROM,
MT_MEMORY_RWX_NONCACHED,
MT_MEMORY_RW_DTCM,
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 83d340702680..ea9bd08895b7 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -17,10 +17,8 @@ struct pci_host_bridge;
struct device;
struct hw_pci {
- struct msi_controller *msi_ctrl;
struct pci_ops *ops;
int nr_controllers;
- unsigned int io_optional:1;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
int (*scan)(int nr, struct pci_host_bridge *);
@@ -28,11 +26,6 @@ struct hw_pci {
void (*postinit)(void);
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
- resource_size_t (*align_resource)(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align);
};
/*
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index d75d39280db7..5f522916ec99 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -7,8 +7,6 @@
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-extern void timer_tick(void);
-
typedef void (*clock_access_fn)(struct timespec64 *);
extern int register_persistent_clock(clock_access_fn read_persistent);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 99035b5891ef..ef2aa79ece5a 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -5,11 +5,16 @@
* Copyright (C) 2000-2002 Russell King
* modification for nommu, Hyok S. Choi, 2004
*
- * Note: this file should not be included by non-asm/.h files
+ * Note: this file should not be included explicitly, include <asm/page.h>
+ * to get access to these definitions.
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#ifndef _ASMARM_PAGE_H
+#error "Do not include <asm/memory.h> directly"
+#endif
+
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
@@ -18,9 +23,16 @@
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
+#include <asm/kasan_def.h>
-/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+/*
+ * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
+ * the virtual address range for userspace.
+ * KERNEL_OFFSET: the virtual address of the start of the kernel image.
+ * we may further offset this with TEXT_OFFSET in practice.
+ */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define KERNEL_OFFSET (PAGE_OFFSET)
#ifdef CONFIG_MMU
@@ -28,7 +40,11 @@
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
+#ifndef CONFIG_KASAN
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
@@ -67,6 +83,10 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+#define FDT_FIXED_BASE UL(0xff800000)
+#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
@@ -107,6 +127,7 @@ extern unsigned long vectors_base;
#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
+#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */
@@ -140,22 +161,15 @@ extern unsigned long vectors_base;
*/
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#ifdef CONFIG_XIP_KERNEL
+#ifndef __ASSEMBLY__
+
/*
- * When referencing data in RAM from the XIP region in a relative manner
- * with the MMU off, we need the relative offset between the two physical
- * addresses. The macro below achieves this, which is:
- * __pa(v_data) - __xip_pa(v_text)
+ * Physical start and end address of the kernel sections. These addresses are
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
-#define PHYS_RELATIVE(v_data, v_text) \
- (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \
- ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \
- CONFIG_XIP_PHYS_ADDR))
-#else
-#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text))
-#endif
-
-#ifndef __ASSEMBLY__
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
@@ -173,6 +187,7 @@ extern unsigned long vectors_base;
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x810000
#define __PV_BITS_7_0 0x81
extern unsigned long __pv_phys_pfn_offset;
@@ -183,43 +198,65 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
-#define __pv_stub(from,to,instr,type) \
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
+ "2: " instr " %0, %0, %3\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 1b - ., 2b - .\n" \
" .popsection\n" \
: "=r" (to) \
- : "r" (from), "I" (type))
+ : "r" (from), "I" (__PV_BITS_31_24), \
+ "I"(__PV_BITS_23_16))
+
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " adds %Q0, %1, %R0, lsl #20\n" \
+ "1: mov %R0, %2\n" \
+ " adc %R0, %R0, #0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - ., 1b - .\n" \
+ " .popsection\n" \
+ : "=&r" (y) \
+ : "r" (x), "I" (__PV_BITS_7_0) \
+ : "cc")
-#define __pv_stub_mov_hi(t) \
- __asm__ volatile("@ __pv_stub_mov\n" \
- "1: mov %R0, %1\n" \
+#else
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "0: movw %0, #0\n" \
+ " lsl %0, #21\n" \
+ " " instr " %0, %1, %0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - .\n" \
" .popsection\n" \
- : "=r" (t) \
- : "I" (__PV_BITS_7_0))
+ : "=&r" (to) \
+ : "r" (from))
#define __pv_add_carry_stub(x, y) \
- __asm__ volatile("@ __pv_add_carry_stub\n" \
- "1: adds %Q0, %1, %2\n" \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " lsls %R0, #21\n" \
+ " adds %Q0, %1, %R0\n" \
+ "1: mvn %R0, #0\n" \
" adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - ., 1b - .\n" \
" .popsection\n" \
- : "+r" (y) \
- : "r" (x), "I" (__PV_BITS_31_24) \
+ : "=&r" (y) \
+ : "r" (x) \
: "cc")
+#endif
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
phys_addr_t t;
if (sizeof(phys_addr_t) == 4) {
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ __pv_stub(x, t, "add");
} else {
- __pv_stub_mov_hi(t);
__pv_add_carry_stub(x, t);
}
return t;
@@ -235,7 +272,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
* assembler expression receives 32 bit argument
* in place where 'r' 32 bit operand is expected.
*/
- __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
+ __pv_stub((unsigned long) x, t, "sub");
return t;
}
@@ -256,10 +293,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
-#define virt_to_pfn(kaddr) \
- ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
- PHYS_PFN_OFFSET)
-
+static inline unsigned long virt_to_pfn(const void *p)
+{
+ unsigned long kaddr = (unsigned long)p;
+ return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+}
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL
@@ -338,19 +377,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
/*
- * Virtual <-> DMA view memory address translations
- * Again, these are *only* valid on the kernel direct mapped RAM
- * memory. Use of these is *deprecated* (and that doesn't mean
- * use the __ prefixed forms instead.) See dma-mapping.h.
- */
-#ifndef __virt_to_bus
-#define __virt_to_bus __virt_to_phys
-#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) __pfn_to_phys(x)
-#define __bus_to_pfn(x) __phys_to_pfn(x)
-#endif
-
-/*
* Conversion between a struct page and a physical address.
*
* page_to_pfn(page) convert a struct page * to a PFN number
@@ -367,6 +393,4 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
#endif
-#include <asm-generic/memory_model.h>
-
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 1592a4264488..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -10,7 +10,7 @@ typedef struct {
#else
int switch_pending;
#endif
- unsigned int vmalloc_seq;
+ atomic_t vmalloc_seq;
unsigned long sigpage;
#ifdef CONFIG_VDSO
unsigned long vdso;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index f99ed524fe41..db2cb06aa8cf 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -23,9 +23,21 @@
void __check_vmalloc_seq(struct mm_struct *mm);
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -50,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
if (irqs_disabled())
/*
@@ -92,33 +103,11 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_MMU */
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-
#endif /* CONFIG_CPU_HAS_ASID */
-#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
@@ -149,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { } while (0)
+#ifdef CONFIG_VMAP_STACK
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (mm != &init_mm)
+ check_vmalloc_seq(mm);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+#endif
+
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 182163b55546..07c51a34f77d 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -3,30 +3,31 @@
#define _ASM_ARM_MODULE_H
#include <asm-generic/module.h>
-
-struct unwind_table;
+#include <asm/unwind.h>
#ifdef CONFIG_ARM_UNWIND
-enum {
- ARM_SEC_INIT,
- ARM_SEC_DEVINIT,
- ARM_SEC_CORE,
- ARM_SEC_EXIT,
- ARM_SEC_DEVEXIT,
- ARM_SEC_HOT,
- ARM_SEC_UNLIKELY,
- ARM_SEC_MAX,
-};
+#define ELF_SECTION_UNWIND 0x70000001
#endif
+#define PLT_ENT_STRIDE L1_CACHE_BYTES
+#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
+#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
+
+struct plt_entries {
+ u32 ldr[PLT_ENT_COUNT];
+ u32 lit[PLT_ENT_COUNT];
+};
+
struct mod_plt_sec {
struct elf32_shdr *plt;
+ struct plt_entries *plt_ent;
int plt_count;
};
struct mod_arch_specific {
#ifdef CONFIG_ARM_UNWIND
- struct unwind_table *unwind[ARM_SEC_MAX];
+ struct list_head unwind_list;
+ struct unwind_table *init_table;
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
struct mod_plt_sec core;
@@ -36,31 +37,12 @@ struct mod_arch_specific {
struct module;
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
-
-/*
- * Add the ARM architecture version to the version magic string
- */
-#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
-
-/* Add __virt_to_phys patching state as well */
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
-#else
-#define MODULE_ARCH_VERMAGIC_P2V ""
-#endif
-
-/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
-#ifdef CONFIG_THUMB2_KERNEL
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#ifdef CONFIG_ARM_MODULE_PLTS
+bool in_module_plt(unsigned long loc);
#else
-#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+static inline bool in_module_plt(unsigned long loc) { return false; }
#endif
-#define MODULE_ARCH_VERMAGIC \
- MODULE_ARCH_VERMAGIC_ARMVSN \
- MODULE_ARCH_VERMAGIC_ARMTHUMB \
- MODULE_ARCH_VERMAGIC_P2V
-
#ifdef CONFIG_THUMB2_KERNEL
#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
diff --git a/arch/arm/include/asm/module.lds.h b/arch/arm/include/asm/module.lds.h
new file mode 100644
index 000000000000..0e7cb4e314b4
--- /dev/null
+++ b/arch/arm/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_ARM_MODULE_PLTS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
index 0ec6f07c2d8a..66b7e68c9b58 100644
--- a/arch/arm/include/asm/nwflash.h
+++ b/arch/arm/include/asm/nwflash.h
@@ -2,7 +2,6 @@
#ifndef _FLASH_H
#define _FLASH_H
-#define FLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
#define CMD_WRITE_DISABLE 0
#define CMD_WRITE_ENABLE 0x28
#define CMD_WRITE_BASE64K_ENABLE 0x47
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 6bff94b2372b..38e3eabff5c3 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -110,12 +110,17 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
-#ifndef CONFIG_CPU_ENDIAN_BE32
+#ifdef CONFIG_CPU_ENDIAN_BE32
+#ifndef __ASSEMBLY__
/*
* On BE32 systems, using 32-bit accesses to store Thumb instructions will not
* work in all cases, due to alignment constraints. For now, a correct
- * version is not provided for BE32.
+ * version is not provided for BE32, but the prototype needs to be there
+ * to compile patch.c.
*/
+extern __u32 __opcode_to_mem_thumb32(__u32);
+#endif
+#else
#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index c2b75cba26df..119aa85d1feb 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -113,6 +113,28 @@ struct cpu_user_fns {
unsigned long vaddr, struct vm_area_struct *vma);
};
+void fa_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
+void feroceon_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wb_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
+void v4wt_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
+
#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;
@@ -147,6 +169,9 @@ extern void copy_page(void *to, const void *from);
#include <asm/pgtable-3level-types.h>
#else
#include <asm/pgtable-2level-types.h>
+#ifdef CONFIG_VMAP_STACK
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+#endif
#endif
#endif /* CONFIG_MMU */
@@ -155,16 +180,16 @@ typedef struct page *pgtable_t;
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
+#define pfn_valid pfn_valid
#endif
-#include <asm/memory.h>
-
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#include <asm/memory.h>
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#include <asm-generic/getorder.h>
+#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
index cdbf02d9c1d4..95d5b0d625cd 100644
--- a/arch/arm/include/asm/paravirt.h
+++ b/arch/arm/include/asm/paravirt.h
@@ -3,23 +3,19 @@
#define _ASM_ARM_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops {
- unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
- struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_ops.time.steal_clock(cpu);
+ return static_call(pv_steal_clock)(cpu);
}
#endif
diff --git a/arch/arm/include/asm/paravirt_api_clock.h b/arch/arm/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..65ac7cee0dad
--- /dev/null
+++ b/arch/arm/include/asm/paravirt_api_clock.h
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 68e6f25784a4..5916b88d4c94 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -22,11 +22,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index f44f448537f2..7545c87c251f 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,18 +5,27 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/insn.h>
+
+register unsigned long current_stack_pointer asm ("sp");
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
-#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
+#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
+ extern unsigned int smp_on_up;
+
+ if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
+ return;
+
/* Set TPIDRPRW */
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
@@ -25,8 +34,28 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
- : "Q" (*(const unsigned long *)current_stack_pointer));
+ asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __per_cpu_offset \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index fe87397c3d8c..bdbc1e590891 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
- (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 069da393110c..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -21,17 +21,7 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#ifdef CONFIG_ARM_LPAE
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- free_page((unsigned long)pmd);
-}
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
@@ -39,14 +29,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#else /* !CONFIG_ARM_LPAE */
+#define PGD_SIZE (PAGE_SIZE << 2)
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
+#ifdef CONFIG_KASAN
+/* The KASan core unconditionally calls pud_populate() on all architectures */
+#define pud_populate(mm,pmd,pte) do { } while (0)
+#else
#define pud_populate(mm,pmd,pte) BUG()
-
+#endif
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -76,6 +71,7 @@ static inline void clean_pte_table(pte_t *pte)
#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
static inline pte_t *
@@ -147,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
__pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 51beec41d48c..ce543cd9380c 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -75,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -124,6 +126,9 @@
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE L_PTE_RDONLY
+
/*
* These are the memory types, defined to be compatible with
* pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
@@ -177,18 +182,39 @@
* the pud: the pud entry is never bad, always exists, and can't be set or
* cleared.
*/
-#define pud_none(pud) (0)
-#define pud_bad(pud) (0)
-#define pud_present(pud) (1)
-#define pud_clear(pudp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
+static inline int pud_none(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return 1;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
+
+#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pmd_large(pmd) (pmd_val(pmd) & 2)
+#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define pmd_present(pmd) (pmd_val(pmd))
@@ -210,8 +236,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pmd_addr_end(addr,end) (end)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
-#define pte_special(pte) (0)
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* We don't have huge page support for short descriptors, for the moment
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5b18295021a0..71c3add6417f 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -25,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
@@ -74,6 +76,9 @@
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define L_PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 7)
+
#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
@@ -104,26 +109,6 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
-/*
- * 2nd stage PTE definitions for LPAE.
- */
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
-#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
-#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-
-#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
-/*
- * Hyp-mode PL2 PTE definitions for LPAE.
- */
-#define L_PTE_HYP L_PTE_USER
-
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
@@ -134,6 +119,7 @@
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_large(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
do { \
@@ -147,18 +133,11 @@
flush_pmd_entry(pudp); \
} while (0)
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the second-level page table.. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define copy_pmd(pmdpd,pmdps) \
@@ -210,8 +189,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
-#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
@@ -225,7 +202,7 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
-PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkwrite_novma, &= ~L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
@@ -240,7 +217,7 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmdp_establish generic_pmdp_establish
/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
}
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 010fa1a35a68..61480d096054 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -21,9 +21,6 @@
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
-/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
@@ -42,22 +39,10 @@
#define swapper_pg_dir ((pgd_t *) 0)
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
typedef pte_t *pte_addr_t;
/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
-/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) (prot)
@@ -79,8 +64,6 @@ extern unsigned int kobjsize(const void *objp);
#define FIRST_USER_ADDRESS 0UL
-#include <asm-generic/pgtable.h>
-
#else
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index eabcb48a7840..d657b84b6bf7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -10,6 +10,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
+
#ifndef CONFIG_MMU
#include <asm-generic/pgtable-nopud.h>
@@ -17,9 +26,8 @@
#else
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
@@ -80,9 +88,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-extern pgprot_t pgprot_hyp_device;
-extern pgprot_t pgprot_s2;
-extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -95,12 +100,6 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
-#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
-#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
-#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
-#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -147,42 +146,15 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgdp_get(pgpd) READ_ONCE(*pgdp)
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_none(pmd) (!pmd_val(pmd))
@@ -193,21 +165,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#ifndef CONFIG_HIGHPTE
-#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
-#define __pte_unmap(pte) do { } while (0)
-#else
-#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
-#define __pte_unmap(pte) kunmap_atomic(pte)
-#endif
-
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
-
-#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte)
-
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
@@ -252,19 +209,9 @@ static inline void __sync_icache_dcache(pte_t pteval)
extern void __sync_icache_dcache(pte_t pteval);
#endif
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- unsigned long ext = 0;
-
- if (addr < TASK_SIZE && pte_valid_user(pteval)) {
- if (!pte_special(pteval))
- __sync_icache_dcache(pteval);
- ext |= PTE_EXT_NG;
- }
-
- set_pte_ext(ptep, pteval, ext);
-}
+void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr);
+#define set_ptes set_ptes
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
@@ -283,7 +230,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
@@ -327,27 +274,47 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
}
/*
- * Encode and decode a swap entry. Swap entries are stored in the Linux
- * page tables as follows:
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset ------------------------> < type -> 0 0
+ * <------------------- offset ------------------> E < type -> 0 0
*
- * This gives us up to 31 swap files and 128GB per swap file. Note that
+ * E is the exclusive marker that is not stored in swap entries.
+ *
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
-#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#define __swp_entry_to_pte(swp) __pte((swp).val)
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_isset(pte, L_PTE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE));
+}
/*
* It is an error for the kernel to have more swap files than we can
@@ -356,12 +323,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-/* FIXME: this is not correct */
-#define kern_addr_valid(addr) (1)
-
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index c82f7a29ec4a..280396483f5d 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -147,8 +147,6 @@ static inline void init_proc_vtable(const struct processor *p)
extern void cpu_resume(void);
-#include <asm/memory.h>
-
#ifdef CONFIG_MMU
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 614bf829e454..326864f79d18 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/unified.h>
+#include <asm/vdso/processor.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -80,20 +81,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() \
- do { \
- smp_mb(); \
- __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
- } while (0)
-#else
-#define cpu_relax() barrier()
-#endif
+unsigned long __get_wchan(struct task_struct *p);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
@@ -105,7 +93,8 @@ unsigned long get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
+ " .align 2\n" \
+ " .long 9998b - .\n" \
" " up "\n" \
" .popsection\n"
#else
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 1e36c40533c1..402e3f34c7ed 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -9,12 +9,12 @@
#ifdef CONFIG_OF
-extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}
diff --git a/arch/arm/include/asm/ptdump.h b/arch/arm/include/asm/ptdump.h
index 0c2d3d0d4cc6..aad1d034136c 100644
--- a/arch/arm/include/asm/ptdump.h
+++ b/arch/arm/include/asm/ptdump.h
@@ -21,6 +21,7 @@ struct ptdump_info {
void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
+#define EFI_RUNTIME_MAP_END SZ_1G
void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 91d6b7856be4..7f44e88d1f25 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -19,7 +19,6 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
- u32 addr_limit;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
@@ -164,5 +163,38 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->ARM_r0 = rc;
+}
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+{
+ if ((cpsr & 0x06000400) == 0) {
+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+ cpsr &= ~PSR_IT_MASK;
+ } else {
+ /* We need to shift left ITSTATE<4:0> */
+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+ unsigned long it = cpsr & mask;
+ it <<= 1;
+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+ it &= mask;
+ cpsr &= ~mask;
+ cpsr |= it;
+ }
+ return cpsr;
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
new file mode 100644
index 000000000000..e9ad0f37d2ba
--- /dev/null
+++ b/arch/arm/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_ARM
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "arm"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
index 4ceb4f757d4d..700b8bcdf9bd 100644
--- a/arch/arm/include/asm/sections.h
+++ b/arch/arm/include/asm/sections.h
@@ -10,8 +10,6 @@ extern char __idmap_text_start[];
extern char __idmap_text_end[];
extern char __entry_text_start[];
extern char __entry_text_end[];
-extern char __hyp_idmap_text_start[];
-extern char __hyp_idmap_text_end[];
static inline bool in_entry_text(unsigned long addr)
{
@@ -22,9 +20,7 @@ static inline bool in_entry_text(unsigned long addr)
static inline bool in_idmap_text(unsigned long addr)
{
void *a = (void *)addr;
- return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) ||
- memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
- a, 1);
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
}
#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/semihost.h b/arch/arm/include/asm/semihost.h
new file mode 100644
index 000000000000..f365787e7c23
--- /dev/null
+++ b/arch/arm/include/asm/semihost.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Adapted for ARM and earlycon:
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Rob Herring <robh@kernel.org>
+ */
+
+#ifndef _ARM_SEMIHOST_H_
+#define _ARM_SEMIHOST_H_
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEMIHOST_SWI "0xab"
+#else
+#define SEMIHOST_SWI "0x123456"
+#endif
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("mov r1, %0\n"
+ "mov r0, #3\n"
+ "svc " SEMIHOST_SWI "\n"
+ : : "r" (&c) : "r0", "r1", "memory");
+}
+
+#endif /* _ARM_SEMIHOST_H_ */
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
index a1ceff4295d3..0211b9c5b14d 100644
--- a/arch/arm/include/asm/set_memory.h
+++ b/arch/arm/include/asm/set_memory.h
@@ -11,6 +11,7 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
@@ -18,12 +19,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 67d20712cb48..cc106f946c69 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -5,21 +5,22 @@
* Copyright (C) 1997-1999 Russell King
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/setup.rst
+ * hardware it's running on. See Documentation/arch/arm/setup.rst
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
+#include <linux/screen_info.h>
#include <uapi/asm/setup.h>
-#define __tag __used __attribute__((__section__(".taglist.init")))
+#define __tag __used __section(".taglist.init")
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
extern int arm_add_memory(u64 start, u64 size);
-extern void early_print(const char *str, ...);
+extern __printf(1, 2) void early_print(const char *str, ...);
extern void dump_machine_table(void);
#ifdef CONFIG_ATAGS_PROC
@@ -28,4 +29,15 @@ extern void save_atags(const struct tag *tags);
static inline void save_atags(const struct tag *tags) { }
#endif
+struct machine_desc;
+void init_default_cache_policy(unsigned long);
+void paging_init(const struct machine_desc *desc);
+void early_mm_init(const struct machine_desc *);
+void adjust_lowmem_bounds(void);
+void setup_dma_zone(const struct machine_desc *desc);
+
+#ifdef CONFIG_VGA_CONSOLE
+extern struct screen_info vgacon_screen_info;
+#endif
+
#endif
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index 65530a042009..8b84092d1518 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -17,7 +17,14 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER)
+
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
+
+void do_rseq_syscall(struct pt_regs *regs);
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
#endif
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
new file mode 100644
index 000000000000..82191dbd7e78
--- /dev/null
+++ b/arch/arm/include/asm/simd.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/hardirq.h>
+
+static __must_check inline bool may_use_simd(void)
+{
+ return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq();
+}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..8c05a7f374d8 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -25,11 +25,6 @@ struct seq_file;
extern void show_ipi_list(struct seq_file *, int);
/*
- * Called from assembly code, this handles an IPI.
- */
-asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
-
-/*
* Called from C code, this handles an IPI.
*/
void handle_IPI(int ipinr, struct pt_regs *regs);
@@ -39,17 +34,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
*/
extern void smp_init_cpus(void);
-
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from platform specific assembly code, this is the
* secondary CPU entry point.
*/
-asmlinkage void secondary_start_kernel(void);
+asmlinkage void secondary_start_kernel(struct task_struct *task);
/*
@@ -62,6 +56,7 @@ struct secondary_data {
};
unsigned long swapper_pg_dir;
void *stack;
+ struct task_struct *task;
};
extern struct secondary_data secondary_data;
extern void secondary_startup(void);
@@ -69,7 +64,7 @@ extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
+static inline void __cpu_die(unsigned int cpu) { }
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -113,7 +108,7 @@ struct of_cpu_method {
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpu_method __cpu_method_of_table_##name \
- __used __section(__cpu_method_of_table) \
+ __used __section("__cpu_method_of_table") \
= { .method = _method, .ops = _ops }
/*
* set platform specific SMP operations
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index d362233856a5..421e3415338a 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -2,7 +2,7 @@
#ifndef ASMARM_SPARSEMEM_H
#define ASMARM_SPARSEMEM_H
-#include <asm/memory.h>
+#include <asm/page.h>
/*
* Two definitions are required for sparsemem:
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644
index 000000000000..d9c28b3b6b62
--- /dev/null
+++ b/arch/arm/include/asm/spectre.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+enum {
+ __SPECTRE_V2_METHOD_BPIALL,
+ __SPECTRE_V2_METHOD_ICIALLU,
+ __SPECTRE_V2_METHOD_SMC,
+ __SPECTRE_V2_METHOD_HVC,
+ __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+ SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+ SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+ SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+ SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+ SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+#else
+static inline void spectre_v2_update_state(unsigned int state,
+ unsigned int methods)
+{}
+#endif
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+void cpu_v7_ca8_ibe(void);
+void cpu_v7_ca15_ibe(void);
+void cpu_v7_bugs_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 8f009e788ad4..f610a773f2be 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -22,7 +22,7 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 5976958647fe..0c14b36ef101 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly"
#endif
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
index 72a20c3a0a90..0bd4979759f1 100644
--- a/arch/arm/include/asm/stackprotector.h
+++ b/arch/arm/include/asm/stackprotector.h
@@ -15,9 +15,6 @@
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
-#include <linux/random.h>
-#include <linux/version.h>
-
#include <asm/thread_info.h>
extern unsigned long __stack_chk_guard;
@@ -30,17 +27,11 @@ extern unsigned long __stack_chk_guard;
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= LINUX_VERSION_CODE;
+ unsigned long canary = get_random_canary();
current->stack_canary = canary;
#ifndef CONFIG_STACKPROTECTOR_PER_TASK
__stack_chk_guard = current->stack_canary;
-#else
- current_thread_info()->stack_canary = current->stack_canary;
#endif
}
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 2d76a2e29f05..360f0d2406bf 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -3,6 +3,7 @@
#define __ASM_STACKTRACE_H
#include <asm/ptrace.h>
+#include <linux/llist.h>
struct stackframe {
/*
@@ -13,6 +14,16 @@ struct stackframe {
unsigned long sp;
unsigned long lr;
unsigned long pc;
+
+ /* address of the LR value on the stack */
+ unsigned long *lr_addr;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+ struct task_struct *tsk;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ bool ex_frame;
+#endif
};
static __always_inline
@@ -22,10 +33,21 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
frame->sp = regs->ARM_sp;
frame->lr = regs->ARM_lr;
frame->pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = current;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ frame->ex_frame = in_entry_text(frame->pc);
+#endif
}
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data);
+ bool (*fn)(void *, unsigned long), void *data);
+extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
deleted file mode 100644
index aaceec7855ec..000000000000
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * stage2 page table helpers
- */
-
-#ifndef __ARM_S2_PGTABLE_H_
-#define __ARM_S2_PGTABLE_H_
-
-/*
- * kvm_mmu_cache_min_pages() is the number of pages required
- * to install a stage-2 translation. We pre-allocate the entry
- * level table at VM creation. Since we have a 3 level page-table,
- * we need only two pages to add a new mapping.
- */
-#define kvm_mmu_cache_min_pages(kvm) 2
-
-#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
-#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
-#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
-#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(kvm, pud) do { } while (0)
-
-#define stage2_pud_none(kvm, pud) pud_none(pud)
-#define stage2_pud_clear(kvm, pud) pud_clear(pud)
-#define stage2_pud_present(kvm, pud) pud_present(pud)
-#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
-
-#define stage2_pud_huge(kvm, pud) pud_huge(pud)
-
-/* Open coded p*d_addr_end that can deal with 64bit addresses */
-static inline phys_addr_t
-stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pud_addr_end(kvm, addr, end) (end)
-
-static inline phys_addr_t
-stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pgd_index(kvm, addr) pgd_index(addr)
-
-#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define stage2_pud_table_empty(kvm, pudp) false
-
-static inline bool kvm_stage2_has_pud(struct kvm *kvm)
-{
- return false;
-}
-
-#define S2_PMD_MASK PMD_MASK
-#define S2_PMD_SIZE PMD_SIZE
-#define S2_PUD_MASK PUD_MASK
-#define S2_PUD_SIZE PUD_SIZE
-
-static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
-{
- return true;
-}
-
-#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 111a1d8a41dd..6c607c68f3ad 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -5,6 +5,9 @@
/*
* We don't do inline string functions, since the
* optimised inline asm versions are not small.
+ *
+ * The __underscore versions of some functions are for KASan to be able
+ * to replace them with instrumented versions.
*/
#define __HAVE_ARCH_STRRCHR
@@ -15,15 +18,18 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
#define __HAVE_ARCH_MEMSET32
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
@@ -39,4 +45,24 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * must use non-instrumented versions of the mem*
+ * functions named __memcpy() etc. All such kernel code has
+ * been tagged with KASAN_SANITIZE_file.o = n, which means
+ * that the address sanitization argument isn't passed to the
+ * compiler, and __SANITIZE_ADDRESS__ is not set. As a result
+ * these defines kick in.
+ */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+
#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 506314265c6f..be81b9ca2ea1 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -13,5 +13,6 @@ extern void cpu_resume(void);
extern void cpu_resume_no_hyp(void);
extern void cpu_resume_arm(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
+extern void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr);
#endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index d3e937dcee4d..9372348516ce 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,7 @@
#define __ASM_ARM_SWITCH_TO_H
#include <linux/thread_info.h>
+#include <asm/smp_plat.h>
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
@@ -10,7 +11,7 @@
* to ensure that the maintenance completes in case we migrate to another
* CPU.
*/
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define __complete_pending_tlbi() dsb(ish)
#else
#define __complete_pending_tlbi()
@@ -26,6 +27,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
+ if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
+ __this_cpu_write(__entry_task, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 39ff217136d1..f46b3c570f92 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
+/*
+ * Unordered
+ */
+
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_mb__before_atomic(); \
+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
+ __smp_mb__after_atomic(); \
+ __ret; \
+})
#endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index fd02761ba06c..fe4326d938c1 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -22,7 +22,24 @@ extern const unsigned long sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return task_thread_info(task)->syscall;
+ if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ return task_thread_info(task)->abi_syscall;
+
+ if (task_thread_info(task)->abi_syscall == -1)
+ return -1;
+
+ return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+}
+
+static inline bool __in_oabi_syscall(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_OABI_COMPAT) &&
+ (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
+}
+
+static inline bool in_oabi_syscall(void)
+{
+ return __in_oabi_syscall(current);
}
static inline void syscall_rollback(struct task_struct *task,
@@ -63,16 +80,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
}
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- const unsigned long *args)
-{
- regs->ARM_ORIG_r0 = args[0];
- args++;
-
- memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
-}
-
static inline int syscall_get_arch(struct task_struct *task)
{
/* ARM tasks don't change audit architectures on the fly. */
diff --git a/arch/arm/include/asm/syscalls.h b/arch/arm/include/asm/syscalls.h
new file mode 100644
index 000000000000..5912e7cffa6a
--- /dev/null
+++ b/arch/arm/include/asm/syscalls.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_SYSCALLS_H
+#define __ASM_SYSCALLS_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+ loff_t offset, loff_t len);
+
+struct oldabi_stat64;
+asmlinkage long sys_oabi_stat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_lstat64(const char __user * filename,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+ struct oldabi_stat64 __user * statbuf);
+asmlinkage long sys_oabi_fstatat64(int dfd,
+ const char __user *filename,
+ struct oldabi_stat64 __user *statbuf,
+ int flag);
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+struct oabi_epoll_event;
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event);
+struct oabi_sembuf;
+struct old_timespec32;
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout);
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops);
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth);
+struct sockaddr;
+asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen);
+asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
+ size_t len, unsigned flags,
+ struct sockaddr __user *addr,
+ int addrlen);
+struct user_msghdr;
+asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args);
+
+#endif
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 66f6a3ae68d2..98b37340376b 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -13,7 +13,6 @@
extern void cpu_init(void);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
index b845b10fe29a..e1f7dca86a22 100644
--- a/arch/arm/include/asm/tcm.h
+++ b/arch/arm/include/asm/tcm.h
@@ -9,24 +9,29 @@
#ifndef __ASMARM_TCM_H
#define __ASMARM_TCM_H
-#ifndef CONFIG_HAVE_TCM
-#error "You should not be including tcm.h unless you have a TCM!"
-#endif
+#ifdef CONFIG_HAVE_TCM
#include <linux/compiler.h>
/* Tag variables with this */
-#define __tcmdata __section(.tcm.data)
+#define __tcmdata __section(".tcm.data")
/* Tag constants with this */
-#define __tcmconst __section(.tcm.rodata)
+#define __tcmconst __section(".tcm.rodata")
/* Tag functions inside TCM called from outside TCM with this */
-#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
+#define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline
/* Tag function inside TCM called from inside TCM with this */
-#define __tcmlocalfunc __section(.tcm.text)
+#define __tcmlocalfunc __section(".tcm.text")
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
bool tcm_dtcm_present(void);
bool tcm_itcm_present(void);
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+static inline void tcm_init(void)
+{
+}
+#endif
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 0d0d5178e2c3..943ffcf069d2 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -13,17 +13,34 @@
#include <asm/fpstate.h>
#include <asm/page.h>
+#ifdef CONFIG_KASAN
+/*
+ * KASan uses a lot of extra stack space so the thread size order needs to
+ * be increased.
+ */
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
+#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
#ifndef __ASSEMBLY__
struct task_struct;
-#include <asm/types.h>
+DECLARE_PER_CPU(struct task_struct *, __entry_task);
-typedef unsigned long mm_segment_t;
+#include <asm/types.h>
+#include <asm/traps.h>
struct cpu_context_save {
__u32 r4;
@@ -46,20 +63,11 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
-#ifdef CONFIG_STACKPROTECTOR_PER_TASK
- unsigned long stack_canary;
-#endif
struct cpu_context_save cpu_context; /* cpu context */
- __u32 syscall; /* syscall number */
- __u8 used_cp[16]; /* thread used copro */
+ __u32 abi_syscall; /* ABI type and syscall nr */
unsigned long tp_value[2]; /* TLS registers */
-#ifdef CONFIG_CRUNCH
- struct crunch_state crunchstate;
-#endif
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
#ifdef CONFIG_ARM_THUMBEE
@@ -69,26 +77,13 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
-/*
- * how to get the current stack pointer in C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-static inline struct thread_info *current_thread_info(void)
+static inline struct task_struct *thread_task(struct thread_info* ti)
{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
+ return (struct task_struct *)ti;
}
#define thread_saved_pc(tsk) \
@@ -104,17 +99,27 @@ static inline struct thread_info *current_thread_info(void)
((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
#endif
-extern void crunch_task_disable(struct thread_info *);
-extern void crunch_task_copy(struct thread_info *, void *);
-extern void crunch_task_restore(struct thread_info *, void *);
-extern void crunch_task_release(struct thread_info *);
-
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
extern void iwmmxt_task_restore(struct thread_info *, void *);
extern void iwmmxt_task_release(struct thread_info *);
extern void iwmmxt_task_switch(struct thread_info *);
+extern int iwmmxt_undef_handler(struct pt_regs *, u32);
+
+static inline void register_iwmmxt_undef_handler(void)
+{
+ static struct undef_hook iwmmxt_undef_hook = {
+ .instr_mask = 0x0c000e00,
+ .instr_val = 0x0c000000,
+ .cpsr_mask = MODE_MASK | PSR_T_BIT,
+ .cpsr_val = USR_MODE,
+ .fn = iwmmxt_undef_handler,
+ };
+
+ register_undef_hook(&iwmmxt_undef_hook);
+}
+
extern void vfp_sync_hwstate(struct thread_info *);
extern void vfp_flush_hwstate(struct thread_info *);
@@ -131,20 +136,23 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
* thread information flags:
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
+ *
+ * Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
*/
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
-#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
-#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 20
+#define TIF_RESTORE_SIGMASK 19
+#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -154,6 +162,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
/* Checks for any syscall work in entry-common.S */
@@ -164,7 +173,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 7c3b3671d6c2..6d1337c169cd 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -11,5 +11,6 @@
typedef unsigned long cycles_t;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 669474add486..f40d06ad5d2a 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -27,7 +27,6 @@
#else /* !CONFIG_MMU */
#include <linux/swap.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
static inline void __tlb_remove_table(void *_table)
@@ -37,14 +36,12 @@ static inline void __tlb_remove_table(void *_table)
#include <asm-generic/tlb.h>
-#ifndef CONFIG_HAVE_RCU_TABLE_FREE
-#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
-#endif
-
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- pgtable_pte_page_dtor(pte);
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
#ifndef CONFIG_ARM_LPAE
/*
@@ -55,16 +52,17 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_table(tlb, pte);
+ tlb_remove_ptdesc(tlb, ptdesc);
}
static inline void
__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
- struct page *page = virt_to_page(pmdp);
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- tlb_remove_table(tlb, page);
+ pagetable_pmd_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
#endif
}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 24cbfc112dfa..38c6e4a2a0b6 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
@@ -626,18 +619,22 @@ extern void flush_bp_all(void);
* If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page. On ARMv6 and later, the cache coherency is handled via
- * the set_pte_at() function.
+ * the set_ptes() function.
*/
#if __LINUX_ARM_ARCH__ < 6
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep);
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
#else
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ unsigned int nr)
{
}
#endif
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 5a66c3b13c92..3dcd0f71a0da 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -12,44 +12,59 @@
.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
- mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
+ @ TLS register update is deferred until return to user space
+ mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
.endm
.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
- ldr \tmp1, =elf_hwcap
- ldr \tmp1, [\tmp1, #0]
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+ .subsection 1
+#endif
+.L0_\@:
+ ldr_va \tmp1, elf_hwcap
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
- mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
- strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+ beq .L2_\@
+ mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+#ifdef CONFIG_SMP
+ b .L1_\@
+ .previous
+#endif
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
.endm
.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
+#else
+#include <asm/smp_plat.h>
#endif
#ifdef CONFIG_TLS_REG_EMUL
#define tls_emu 1
#define has_tls_reg 1
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_none
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
+#define defer_tls_reg_update is_smp()
#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
+#define defer_tls_reg_update 1
#define switch_tls switch_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0
+#define defer_tls_reg_update 0
#define switch_tls switch_tls_software
#endif
@@ -78,10 +93,10 @@ static inline void set_tls(unsigned long val)
barrier();
if (!tls_emu) {
- if (has_tls_reg) {
+ if (has_tls_reg && !defer_tls_reg_update) {
asm("mcr p15, 0, %0, c13, c0, 3"
: : "r" (val));
- } else {
+ } else if (!has_tls_reg) {
#ifdef CONFIG_KUSER_HELPERS
/*
* User space must never try to access this
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 8a0fae94d45e..853c4f81ba4a 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -7,8 +7,14 @@
#include <linux/cpumask.h>
#include <linux/arch_topology.h>
+/* big.LITTLE switcher is incompatible with frequency invariance */
+#ifndef CONFIG_BL_SWITCHER
/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+#define arch_scale_freq_ref topology_get_freq_ref
+#endif
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
@@ -16,6 +22,10 @@
/* Enable topology flag updates */
#define arch_update_cpu_topology topology_update_cpu_topology
+/* Replace task scheduler's default thermal pressure API */
+#define arch_scale_thermal_pressure topology_get_thermal_pressure
+#define arch_update_thermal_pressure topology_update_thermal_pressure
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 172b08ff3760..2621b9fb9b19 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -2,6 +2,7 @@
#ifndef _ASMARM_TRAP_H
#define _ASMARM_TRAP_H
+#include <linux/linkage.h>
#include <linux/list.h>
struct pt_regs;
@@ -28,10 +29,20 @@ static inline int __in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__irqentry_text_end;
}
-extern void __init early_trap_init(void *);
-extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void early_trap_init(void *);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
extern void ptrace_break(struct pt_regs *regs);
extern void *vectors_page;
+asmlinkage void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl);
+asmlinkage void do_undefinstr(struct pt_regs *regs);
+asmlinkage void handle_fiq_as_nmi(struct pt_regs *regs);
+asmlinkage void bad_mode(struct pt_regs *regs, int reason);
+asmlinkage int arm_syscall(int no, struct pt_regs *regs);
+asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs);
+asmlinkage void __div0(void);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+
#endif
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644
index 000000000000..65da32e1f1c1
--- /dev/null
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ .endm
+
+#undef DACR
+
+#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 98c6b91be4a8..9556d04387f7 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -8,9 +8,11 @@
/*
* User space memory access functions
*/
+#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/domain.h>
+#include <asm/unaligned.h>
#include <asm/unified.h>
#include <asm/compiler.h>
@@ -52,43 +54,8 @@ static __always_inline void uaccess_restore(unsigned int flags)
extern int __get_user_bad(void);
extern int __put_user_bad(void);
-/*
- * Note that this is actually 0x1,0000,0000
- */
-#define KERNEL_DS 0x00000000
-
#ifdef CONFIG_MMU
-#define USER_DS TASK_SIZE
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
-{
- current_thread_info()->addr_limit = fs;
-
- /*
- * Prevent a mispredicted conditional call to set_fs from forwarding
- * the wrong address limit to access_ok under speculation.
- */
- dsb(nsh);
- isb();
-
- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
-}
-
-#define segment_eq(a, b) ((a) == (b))
-
-/* We use 33-bit arithmetic here... */
-#define __range_ok(addr, size) ({ \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- __asm__(".syntax unified\n" \
- "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
- : "=&r" (flag), "=&r" (roksum) \
- : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; })
-
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -116,7 +83,7 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
" subshs %1, %1, %2\n"
" movlo %0, #0\n"
: "+r" (safe_ptr), "=&r" (tmp)
- : "r" (size), "r" (current_thread_info()->addr_limit)
+ : "r" (size), "r" (TASK_SIZE)
: "cc");
csdb();
@@ -143,16 +110,6 @@ extern int __get_user_64t_1(void *);
extern int __get_user_64t_2(void *);
extern int __get_user_64t_4(void *);
-#define __GUP_CLOBBER_1 "lr", "cc"
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define __GUP_CLOBBER_2 "ip", "lr", "cc"
-#else
-#define __GUP_CLOBBER_2 "lr", "cc"
-#endif
-#define __GUP_CLOBBER_4 "lr", "cc"
-#define __GUP_CLOBBER_32t_8 "lr", "cc"
-#define __GUP_CLOBBER_8 "lr", "cc"
-
#define __get_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
@@ -160,7 +117,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
- : __GUP_CLOBBER_##__s)
+ : "ip", "lr", "cc")
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
@@ -182,7 +139,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_64t_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
- : __GUP_CLOBBER_##__s)
+ : "ip", "lr", "cc")
#else
#define __get_user_x_64t __get_user_x
#endif
@@ -190,12 +147,13 @@ extern int __get_user_64t_4(void *);
#define __get_user_check(x, p) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ unsigned long __limit = TASK_SIZE - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \
register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
+ int __tmp_e; \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -223,9 +181,10 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ __tmp_e = __e; \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
- __e; \
+ __tmp_e; \
})
#define get_user(x, p) \
@@ -241,7 +200,7 @@ extern int __put_user_8(void *, unsigned long long);
#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ unsigned long __limit = TASK_SIZE - 1; \
register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
@@ -258,29 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
#else /* CONFIG_MMU */
-/*
- * uClinux has only one addr space, so has simplified address limits.
- */
-#define USER_DS KERNEL_DS
-
-#define segment_eq(a, b) (1)
-#define __addr_ok(addr) ((void)(addr), 1)
-#define __range_ok(addr, size) ((void)(addr), 0)
-#define get_fs() (KERNEL_DS)
-
-static inline void set_fs(mm_segment_t fs)
-{
-}
-
#define get_user(x, p) __get_user(x, p)
#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
-#define access_ok(addr, size) (__range_ok(addr, size) == 0)
-
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : get_fs())
+#include <asm-generic/access_ok.h>
#ifdef CONFIG_CPU_SPECTRE
/*
@@ -304,11 +246,11 @@ static inline void set_fs(mm_segment_t fs)
#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
+ __get_user_err((x), (ptr), __gu_err, TUSER()); \
__gu_err; \
})
-#define __get_user_err(x, ptr, err) \
+#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
@@ -317,18 +259,19 @@ do { \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
- case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
- case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
+#endif
#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -344,40 +287,38 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-#define __get_user_asm_byte(x, addr, err) \
- __get_user_asm(x, addr, err, ldrb)
+#define __get_user_asm_byte(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrb" __t)
#if __LINUX_ARM_ARCH__ >= 6
-#define __get_user_asm_half(x, addr, err) \
- __get_user_asm(x, addr, err, ldrh)
+#define __get_user_asm_half(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrh" __t)
#else
#ifndef __ARMEB__
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = __b1 | (__b2 << 8); \
})
#else
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
-#define __get_user_asm_word(x, addr, err) \
- __get_user_asm(x, addr, err, ldr)
-#endif
-
+#define __get_user_asm_word(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldr" __t)
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
@@ -421,7 +362,7 @@ do { \
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
- __put_user_nocheck_##__size(x, __pu_addr, __err); \
+ __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
} while (0)
#define __put_user_nocheck_1 __put_user_asm_byte
@@ -429,9 +370,11 @@ do { \
#define __put_user_nocheck_4 __put_user_asm_word
#define __put_user_nocheck_8 __put_user_asm_dword
+#endif /* !CONFIG_CPU_SPECTRE */
+
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -446,36 +389,36 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
-#define __put_user_asm_byte(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, strb)
+#define __put_user_asm_byte(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strb" __t)
#if __LINUX_ARM_ARCH__ >= 6
-#define __put_user_asm_half(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, strh)
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strh" __t)
#else
#ifndef __ARMEB__
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp, __pu_addr, err); \
- __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
})
#else
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
- __put_user_asm_byte(__temp, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
})
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
-#define __put_user_asm_word(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, str)
+#define __put_user_asm_word(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "str" __t)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -485,12 +428,12 @@ do { \
#define __reg_oper1 "%R2"
#endif
-#define __put_user_asm_dword(x, __pu_addr, err) \
+#define __put_user_asm_dword(x, __pu_addr, err, __t) \
__asm__ __volatile__( \
- ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
- ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
- THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
- THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
+ ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -506,7 +449,52 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
-#endif /* !CONFIG_CPU_SPECTRE */
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (src); \
+ unsigned long __src = (unsigned long)(__pk_ptr); \
+ type __val; \
+ int __err = 0; \
+ switch (sizeof(type)) { \
+ case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
+ case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
+ case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
+ case 8: { \
+ u32 *__v32 = (u32*)&__val; \
+ __get_user_asm_word(__v32[0], __src, __err, ""); \
+ if (__err) \
+ break; \
+ __get_user_asm_word(__v32[1], __src+4, __err, ""); \
+ break; \
+ } \
+ default: __err = __get_user_bad(); break; \
+ } \
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
+ put_unaligned(__val, (type *)(dst)); \
+ else \
+ *(type *)(dst) = __val; /* aligned by caller */ \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (dst); \
+ unsigned long __dst = (unsigned long)__pk_ptr; \
+ int __err = 0; \
+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
+ ? get_unaligned((type *)(src)) \
+ : *(type *)(src); /* aligned by caller */ \
+ switch (sizeof(type)) { \
+ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
+ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
+ case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
+ case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ if (__err) \
+ goto err_label; \
+} while (0)
#ifdef CONFIG_MMU
extern unsigned long __must_check
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 5c5e62cb304b..4048c92d9c2b 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -43,17 +43,6 @@ struct ucontext {
*/
#define DUMMY_MAGIC 0xb0d9ed01
-#ifdef CONFIG_CRUNCH
-#define CRUNCH_MAGIC 0x5065cf03
-#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
-
-struct crunch_sigframe {
- unsigned long magic;
- unsigned long size;
- struct crunch_state storage;
-} __attribute__((__aligned__(8)));
-#endif
-
#ifdef CONFIG_IWMMXT
/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */
#define IWMMXT_MAGIC 0x12ef842a
@@ -92,9 +81,6 @@ struct vfp_sigframe
* one of these.
*/
struct aux_sigframe {
-#ifdef CONFIG_CRUNCH
- struct crunch_sigframe crunch;
-#endif
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
deleted file mode 100644
index ab905ffcf193..000000000000
--- a/arch/arm/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
-
-/*
- * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
- * but we don't want to use linux/unaligned/access_ok.h since that can lead
- * to traps on unaligned stm/ldm or strd/ldrd.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index 1e2c3eb04353..ce9689118dbb 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -24,10 +24,6 @@ __asm__(".syntax unified");
#ifdef CONFIG_THUMB2_KERNEL
-#if __GNUC__ < 4
-#error Thumb-2 kernel requires gcc >= 4
-#endif
-
/* The CPSR bit describing the instruction set (Thumb) */
#define PSR_ISETSTATE PSR_T_BIT
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index 6e282c33126b..d60b09a5acfc 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -24,6 +24,7 @@ struct unwind_idx {
struct unwind_table {
struct list_head list;
+ struct list_head mod_list;
const struct unwind_idx *start;
const struct unwind_idx *origin;
const struct unwind_idx *stop;
@@ -36,7 +37,12 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
unsigned long text_addr,
unsigned long text_size);
extern void unwind_table_del(struct unwind_table *tab);
-extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
+
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index c799a3c49342..167d44b550f4 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -77,10 +77,6 @@ struct user{
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 2cb00d15831b..4512f7e1918f 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -13,6 +13,7 @@
#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
#define V7M_SCB_VTOR 0x08
@@ -38,7 +39,7 @@
#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
-#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
/*
* When branching to an address that has bits [31:28] == 0xf an exception return
diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..50c0b19fb755
--- /dev/null
+++ b/arch/arm/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h
new file mode 100644
index 000000000000..bed16fa1865e
--- /dev/null
+++ b/arch/arm/include/asm/vdso/cp15.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_CP15_H
+#define __ASM_VDSO_CP15_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_CP15
+
+#include <linux/stringify.h>
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
+#endif /* CONFIG_CPU_CP15 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CP15_H */
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
index 0ad2429c324f..2134cbd5469f 100644
--- a/arch/arm/include/asm/vdso/gettimeofday.h
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -8,8 +8,9 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
-#include <asm/cp15.h>
+#include <asm/errno.h>
#include <asm/unistd.h>
+#include <asm/vdso/cp15.h>
#include <uapi/linux/time.h>
#define VDSO_HAS_CLOCK_GETRES 1
@@ -52,6 +53,24 @@ static __always_inline long clock_gettime_fallback(
return ret;
}
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
static __always_inline int clock_getres_fallback(
clockid_t _clkid,
struct __kernel_timespec *_ts)
@@ -70,20 +89,51 @@ static __always_inline int clock_getres_fallback(
return ret;
}
-static __always_inline u64 __arch_get_hw_counter(int clock_mode)
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static inline bool arm_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
+}
+#define __arch_vdso_hres_capable arm_vdso_hres_capable
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode,
+ const struct vdso_data *vd)
{
#ifdef CONFIG_ARM_ARCH_TIMER
u64 cycle_now;
- if (!clock_mode)
- return -EINVAL;
+ /*
+ * Core checks for mode already, so this raced against a concurrent
+ * update. Return something. Core will do another round and then
+ * see the mode change and fallback to the syscall.
+ */
+ if (clock_mode == VDSO_CLOCKMODE_NONE)
+ return 0;
isb();
cycle_now = read_sysreg(CNTVCT);
return cycle_now;
#else
- return -EINVAL; /* use fallback */
+ /* Make GCC happy. This is compiled out anyway */
+ return 0;
#endif
}
diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..45efb3ff511c
--- /dev/null
+++ b/arch/arm/include/asm/vdso/processor.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
index c4166f317071..47e41ae8ccd0 100644
--- a/arch/arm/include/asm/vdso/vsyscall.h
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -11,18 +11,6 @@
extern struct vdso_data *vdso_data;
extern bool cntvct_ok;
-static __always_inline
-bool tk_is_cntvct(const struct timekeeper *tk)
-{
- if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
- return false;
-
- if (!tk->tkr_mono.clock->archdata.vdso_direct)
- return false;
-
- return true;
-}
-
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
@@ -34,29 +22,6 @@ struct vdso_data *__arm_get_k_vdso_data(void)
#define __arch_get_k_vdso_data __arm_get_k_vdso_data
static __always_inline
-int __arm_update_vdso_data(void)
-{
- return !cntvct_ok;
-}
-#define __arch_update_vdso_data __arm_update_vdso_data
-
-static __always_inline
-int __arm_get_clock_mode(struct timekeeper *tk)
-{
- u32 __tk_is_cntvct = tk_is_cntvct(tk);
-
- return __tk_is_cntvct;
-}
-#define __arch_get_clock_mode __arm_get_clock_mode
-
-static __always_inline
-int __arm_use_vsyscall(struct vdso_data *vdata)
-{
- return vdata[CS_HRES_COARSE].clock_mode;
-}
-#define __arch_use_vsyscall __arm_use_vsyscall
-
-static __always_inline
void __arm_sync_vdso_data(struct vdso_data *vdata)
{
flush_dcache_page(virt_to_page(vdata));
diff --git a/arch/arm/include/asm/vermagic.h b/arch/arm/include/asm/vermagic.h
new file mode 100644
index 000000000000..62ce94e26a63
--- /dev/null
+++ b/arch/arm/include/asm/vermagic.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+
+/*
+ * Add the ARM architecture version to the version magic string
+ */
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_ARMVSN \
+ MODULE_ARCH_VERMAGIC_ARMTHUMB \
+ MODULE_ARCH_VERMAGIC_P2V
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index 7157d2a30a49..157ea3426158 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -9,6 +9,7 @@
#ifndef __ASM_VFP_H
#define __ASM_VFP_H
+#ifndef CONFIG_AS_VFP_VMRS_FPINST
#define FPSID cr0
#define FPSCR cr1
#define MVFR1 cr6
@@ -16,6 +17,7 @@
#define FPEXC cr8
#define FPINST cr9
#define FPINST2 cr10
+#endif
/* FPSID bits */
#define FPSID_IMPLEMENTER_BIT (24)
@@ -85,6 +87,12 @@
#define MVFR0_DP_BIT (8)
#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
+/* MVFR1 bits */
+#define MVFR1_ASIMDHP_BIT (20)
+#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
+#define MVFR1_FPHP_BIT (24)
+#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
+
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT)
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 628c336e8e3b..ba0d4cb5377e 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -8,7 +8,16 @@
#include <asm/vfp.h>
-@ Macros to allow building with old toolkits (with no VFP support)
+#ifdef CONFIG_AS_VFP_VMRS_FPINST
+ .macro VFPFMRX, rd, sysreg, cond
+ vmrs\cond \rd, \sysreg
+ .endm
+
+ .macro VFPFMXR, sysreg, rd, cond
+ vmsr\cond \sysreg, \rd
+ .endm
+#else
+ @ Macros to allow building with old toolkits (with no VFP support)
.macro VFPFMRX, rd, sysreg, cond
MRC\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMRX \rd, \sysreg
.endm
@@ -16,26 +25,29 @@
.macro VFPFMXR, sysreg, rd, cond
MCR\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMXR \sysreg, \rd
.endm
+#endif
@ read all the working registers back into the VFP
.macro VFPFLDMIA, base, tmp
+ .fpu vfpv2
#if __LINUX_ARM_ARCH__ < 6
- LDC p11, cr0, [\base],#33*4 @ FLDMIAX \base!, {d0-d15}
+ fldmiax \base!, {d0-d15}
#else
- LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15}
+ vldmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ vldmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
@@ -44,22 +56,23 @@
@ write all the working registers out of the VFP
.macro VFPFSTMIA, base, tmp
#if __LINUX_ARM_ARCH__ < 6
- STC p11, cr0, [\base],#33*4 @ FSTMIAX \base!, {d0-d15}
+ fstmiax \base!, {d0-d15}
#else
- STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15}
+ vstmia \base!, {d0-d15}
#endif
#ifdef CONFIG_VFPv3
+ .fpu vfpv3
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiane \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ vstmiaeq \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 7c0bee57855a..6c430ec371df 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -5,6 +5,7 @@
#include <linux/io.h>
extern unsigned long vga_base;
+extern struct screen_info vgacon_screen_info;
#define VGA_MAP_MEM(x,s) (vga_base + (x))
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 17c26ccd126d..dd9697b2bde8 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -39,8 +39,6 @@ static inline void sync_boot_mode(void)
sync_cache_r(&__boot_cpu_mode);
}
-void __hyp_set_vectors(unsigned long phys_vector_base);
-void __hyp_reset_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
#define sync_boot_mode()
@@ -67,18 +65,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
-static inline bool has_vhe(void)
-{
- return false;
-}
-
-/* The section containing the hypervisor idmap text */
-extern char __hyp_idmap_text_start[];
-extern char __hyp_idmap_text_end[];
-
-/* The section containing the hypervisor text */
-extern char __hyp_text_start[];
-extern char __hyp_text_end[];
#endif
#else
@@ -87,9 +73,6 @@ extern char __hyp_text_end[];
#define HVC_SET_VECTORS 0
#define HVC_SOFT_RESTART 1
-#define HVC_RESET_VECTORS 2
-
-#define HVC_STUB_HCALL_NR 3
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644
index 000000000000..a9b3718b8600
--- /dev/null
+++ b/arch/arm/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..4c8632d5c432
--- /dev/null
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x) x
+#else
+#define ARM_CPU_DISCARD(x) x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
+#endif
+
+#ifdef CONFIG_MMU
+#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_DISCARD(x)
+#else
+#define ARM_MMU_KEEP(x)
+#define ARM_MMU_DISCARD(x) x
+#endif
+
+/*
+ * ld.lld does not support NOCROSSREFS:
+ * https://github.com/ClangBuiltLinux/linux/issues/1609
+ */
+#ifdef CONFIG_LD_IS_LLD
+#define NOCROSSREFS
+#endif
+
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section) \
+ sym##_start = LOADADDR(section); \
+ sym##_end = LOADADDR(section) + SIZEOF(section)
+
+#define PROC_INFO \
+ . = ALIGN(4); \
+ __proc_info_begin = .; \
+ *(.proc.info.init) \
+ __proc_info_end = .;
+
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .; \
+
+#define ARM_DISCARD \
+ *(.ARM.exidx.exit.text) \
+ *(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
+ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
+ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
+ ARM_EXIT_DISCARD(EXIT_TEXT) \
+ ARM_EXIT_DISCARD(EXIT_DATA) \
+ EXIT_CALL \
+ ARM_MMU_DISCARD(*(.text.fixup)) \
+ ARM_MMU_DISCARD(*(__ex_table)) \
+ COMMON_DISCARDS
+
+/*
+ * Sections that should stay zero sized, which is safer to explicitly
+ * check instead of blindly discarding.
+ */
+#define ARM_ASSERTS \
+ .plt : { \
+ *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \
+ } \
+ ASSERT(SIZEOF(.plt) == 0, \
+ "Unexpected run-time procedure linkages detected!")
+
+#define ARM_DETAILS \
+ ELF_DETAILS \
+ .ARM.attributes 0 : { *(.ARM.attributes) }
+
+#define ARM_STUBS_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ *(.vfp11_veneer) \
+ *(.v4_bx)
+
+#define ARM_TEXT \
+ IDMAP_TEXT \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .; \
+ IRQENTRY_TEXT \
+ SOFTIRQENTRY_TEXT \
+ TEXT_TEXT \
+ SCHED_TEXT \
+ LOCK_TEXT \
+ KPROBES_TEXT \
+ ARM_STUBS_TEXT \
+ . = ALIGN(4); \
+ *(.got) /* Global offset table */ \
+ ARM_CPU_KEEP(PROC_INFO)
+
+/* Stack unwinding tables */
+#define ARM_UNWIND_SECTIONS \
+ . = ALIGN(8); \
+ .ARM.unwind_idx : { \
+ __start_unwind_idx = .; \
+ *(.ARM.exidx*) \
+ __stop_unwind_idx = .; \
+ } \
+ .ARM.unwind_tab : { \
+ __start_unwind_tab = .; \
+ *(.ARM.extab*) \
+ __stop_unwind_tab = .; \
+ }
+
+/*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+#define ARM_VECTORS \
+ __vectors_lma = .; \
+ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
+ .vectors { \
+ *(.vectors) \
+ } \
+ .vectors.bhb.loop8 { \
+ *(.vectors.bhb.loop8) \
+ } \
+ .vectors.bhb.bpiall { \
+ *(.vectors.bhb.bpiall) \
+ } \
+ } \
+ ARM_LMA(__vectors, .vectors); \
+ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
+ ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
+ . = __vectors_lma + SIZEOF(.vectors) + \
+ SIZEOF(.vectors.bhb.loop8) + \
+ SIZEOF(.vectors.bhb.bpiall); \
+ \
+ __stubs_lma = .; \
+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
+ *(.stubs) \
+ } \
+ ARM_LMA(__stubs, .stubs); \
+ . = __stubs_lma + SIZEOF(.stubs); \
+ \
+ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+
+#define ARM_TCM \
+ __itcm_start = ALIGN(4); \
+ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
+ __sitcm_text = .; \
+ *(.tcm.text) \
+ *(.tcm.rodata) \
+ . = ALIGN(4); \
+ __eitcm_text = .; \
+ } \
+ . = __itcm_start + SIZEOF(.text_itcm); \
+ \
+ __dtcm_start = .; \
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
+ __sdtcm_data = .; \
+ *(.tcm.data) \
+ . = ALIGN(4); \
+ __edtcm_data = .; \
+ } \
+ . = __dtcm_start + SIZEOF(.data_dtcm);
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 31bbc803cecb..dc7f6e91aafa 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1 +1,6 @@
#include <xen/arm/page.h>
+
+static inline bool xen_kernel_unmapped_at_usr(void)
+{
+ return false;
+}
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000000..455ade5d5320
--- /dev/null
+++ b/arch/arm/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/xen-ops.h
index 27e984977402..7ebb7eb0bd93 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -1,2 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <xen/arm/page-coherent.h>
+#include <xen/arm/xen-ops.h>
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index aefddec79286..934b549905f5 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -44,13 +44,14 @@
: "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
static void
-xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -64,14 +65,15 @@ xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
register unsigned int a2 __asm__("r5");
register unsigned int a3 __asm__("r6");
- register unsigned int a4 __asm__("r7");
+ register unsigned int a4 __asm__("r10");
register unsigned int b1 __asm__("r8");
register unsigned int b2 __asm__("r9");
register unsigned int b3 __asm__("ip");
@@ -86,8 +88,10 @@ xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -105,8 +109,11 @@ xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -146,7 +153,8 @@ static struct xor_block_template xor_block_arm4regs = {
extern struct xor_block_template const xor_block_neon_inner;
static void
-xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
if (in_interrupt()) {
xor_arm4regs_2(bytes, p1, p2);
@@ -158,8 +166,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
if (in_interrupt()) {
xor_arm4regs_3(bytes, p1, p2, p3);
@@ -171,8 +180,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
if (in_interrupt()) {
xor_arm4regs_4(bytes, p1, p2, p3, p4);
@@ -184,8 +195,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
if (in_interrupt()) {
xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);