summaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kbuild2
-rw-r--r--arch/parisc/Kconfig117
-rw-r--r--arch/parisc/Kconfig.debug22
-rw-r--r--arch/parisc/Makefile22
-rw-r--r--arch/parisc/boot/Makefile2
-rw-r--r--arch/parisc/boot/compressed/Makefile5
-rw-r--r--arch/parisc/boot/compressed/misc.c12
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig66
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig59
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/agp.h21
-rw-r--r--arch/parisc/include/asm/alternative.h13
-rw-r--r--arch/parisc/include/asm/assembly.h28
-rw-r--r--arch/parisc/include/asm/atomic.h27
-rw-r--r--arch/parisc/include/asm/barrier.h4
-rw-r--r--arch/parisc/include/asm/bitops.h6
-rw-r--r--arch/parisc/include/asm/bug.h46
-rw-r--r--arch/parisc/include/asm/bugs.h20
-rw-r--r--arch/parisc/include/asm/cache.h16
-rw-r--r--arch/parisc/include/asm/cacheflush.h34
-rw-r--r--arch/parisc/include/asm/cachetype.h9
-rw-r--r--arch/parisc/include/asm/checksum.h10
-rw-r--r--arch/parisc/include/asm/cmpxchg.h26
-rw-r--r--arch/parisc/include/asm/current.h4
-rw-r--r--arch/parisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/include/asm/dma.h2
-rw-r--r--arch/parisc/include/asm/dwarf.h4
-rw-r--r--arch/parisc/include/asm/elf.h13
-rw-r--r--arch/parisc/include/asm/extable.h64
-rw-r--r--arch/parisc/include/asm/fb.h24
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/floppy.h11
-rw-r--r--arch/parisc/include/asm/ftrace.h8
-rw-r--r--arch/parisc/include/asm/grfioctl.h38
-rw-r--r--arch/parisc/include/asm/hugetlb.h19
-rw-r--r--arch/parisc/include/asm/ide.h54
-rw-r--r--arch/parisc/include/asm/io.h60
-rw-r--r--arch/parisc/include/asm/irqflags.h5
-rw-r--r--arch/parisc/include/asm/jump_label.h16
-rw-r--r--arch/parisc/include/asm/kexec.h4
-rw-r--r--arch/parisc/include/asm/kgdb.h4
-rw-r--r--arch/parisc/include/asm/kprobes.h3
-rw-r--r--arch/parisc/include/asm/ldcw.h39
-rw-r--r--arch/parisc/include/asm/led.h16
-rw-r--r--arch/parisc/include/asm/linkage.h4
-rw-r--r--arch/parisc/include/asm/machdep.h17
-rw-r--r--arch/parisc/include/asm/mckinley.h8
-rw-r--r--arch/parisc/include/asm/mman.h29
-rw-r--r--arch/parisc/include/asm/page.h24
-rw-r--r--arch/parisc/include/asm/parisc-device.h4
-rw-r--r--arch/parisc/include/asm/pdc.h14
-rw-r--r--arch/parisc/include/asm/pdcpat.h4
-rw-r--r--arch/parisc/include/asm/perf_event.h8
-rw-r--r--arch/parisc/include/asm/pgalloc.h39
-rw-r--r--arch/parisc/include/asm/pgtable.h126
-rw-r--r--arch/parisc/include/asm/prefetch.h4
-rw-r--r--arch/parisc/include/asm/processor.h43
-rw-r--r--arch/parisc/include/asm/psw.h4
-rw-r--r--arch/parisc/include/asm/ropes.h9
-rw-r--r--arch/parisc/include/asm/runway.h3
-rw-r--r--arch/parisc/include/asm/shmparam.h15
-rw-r--r--arch/parisc/include/asm/signal.h16
-rw-r--r--arch/parisc/include/asm/smp.h4
-rw-r--r--arch/parisc/include/asm/special_insns.h34
-rw-r--r--arch/parisc/include/asm/spinlock.h37
-rw-r--r--arch/parisc/include/asm/spinlock_types.h17
-rw-r--r--arch/parisc/include/asm/syscall.h19
-rw-r--r--arch/parisc/include/asm/text-patching.h (renamed from arch/parisc/include/asm/patch.h)0
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h68
-rw-r--r--arch/parisc/include/asm/unaligned.h11
-rw-r--r--arch/parisc/include/asm/unistd.h59
-rw-r--r--arch/parisc/include/asm/vdso.h8
-rw-r--r--arch/parisc/include/asm/video.h16
-rw-r--r--arch/parisc/include/uapi/asm/cachectl.h12
-rw-r--r--arch/parisc/include/uapi/asm/errno.h2
-rw-r--r--arch/parisc/include/uapi/asm/ioctls.h8
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h32
-rw-r--r--arch/parisc/include/uapi/asm/perf_regs.h63
-rw-r--r--arch/parisc/include/uapi/asm/signal.h14
-rw-r--r--arch/parisc/include/uapi/asm/socket.h18
-rwxr-xr-xarch/parisc/install.sh2
-rw-r--r--arch/parisc/kernel/Makefile8
-rw-r--r--arch/parisc/kernel/alternative.c2
-rw-r--r--arch/parisc/kernel/asm-offsets.c5
-rw-r--r--arch/parisc/kernel/audit.c9
-rw-r--r--arch/parisc/kernel/cache.c544
-rw-r--r--arch/parisc/kernel/compat_audit.c16
-rw-r--r--arch/parisc/kernel/drivers.c66
-rw-r--r--arch/parisc/kernel/entry.S134
-rw-r--r--arch/parisc/kernel/firmware.c178
-rw-r--r--arch/parisc/kernel/ftrace.c11
-rw-r--r--arch/parisc/kernel/head.S21
-rw-r--r--arch/parisc/kernel/irq.c11
-rw-r--r--arch/parisc/kernel/jump_label.c2
-rw-r--r--arch/parisc/kernel/kexec.c2
-rw-r--r--arch/parisc/kernel/kexec_file.c8
-rw-r--r--arch/parisc/kernel/kgdb.c2
-rw-r--r--arch/parisc/kernel/kprobes.c2
-rw-r--r--arch/parisc/kernel/module.c65
-rw-r--r--arch/parisc/kernel/pa7300lc.c51
-rw-r--r--arch/parisc/kernel/pacache.S2
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c12
-rw-r--r--arch/parisc/kernel/patch.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c34
-rw-r--r--arch/parisc/kernel/pdc_chassis.c23
-rw-r--r--arch/parisc/kernel/pdt.c8
-rw-r--r--arch/parisc/kernel/perf.c11
-rw-r--r--arch/parisc/kernel/perf_event.c27
-rw-r--r--arch/parisc/kernel/perf_regs.c61
-rw-r--r--arch/parisc/kernel/process.c41
-rw-r--r--arch/parisc/kernel/processor.c32
-rw-r--r--arch/parisc/kernel/ptrace.c29
-rw-r--r--arch/parisc/kernel/real2.S5
-rw-r--r--arch/parisc/kernel/setup.c121
-rw-r--r--arch/parisc/kernel/signal.c12
-rw-r--r--arch/parisc/kernel/smp.c30
-rw-r--r--arch/parisc/kernel/sys_parisc.c222
-rw-r--r--arch/parisc/kernel/sys_parisc32.c9
-rw-r--r--arch/parisc/kernel/syscall.S67
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl29
-rw-r--r--arch/parisc/kernel/time.c261
-rw-r--r--arch/parisc/kernel/traps.c38
-rw-r--r--arch/parisc/kernel/unaligned.c128
-rw-r--r--arch/parisc/kernel/unaligned.h3
-rw-r--r--arch/parisc/kernel/unwind.c27
-rw-r--r--arch/parisc/kernel/vdso32/Makefile27
-rw-r--r--arch/parisc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/parisc/kernel/vdso32/vdso32_generic.c32
-rw-r--r--arch/parisc/kernel/vdso64/Makefile29
-rw-r--r--arch/parisc/kernel/vdso64/vdso64.lds.S2
-rw-r--r--arch/parisc/kernel/vdso64/vdso64_generic.c24
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S5
-rw-r--r--arch/parisc/lib/bitops.c52
-rw-r--r--arch/parisc/lib/checksum.c13
-rw-r--r--arch/parisc/lib/io.c166
-rw-r--r--arch/parisc/lib/memcpy.c18
-rw-r--r--arch/parisc/lib/ucmpdi2.c3
-rw-r--r--arch/parisc/math-emu/Makefile3
-rw-r--r--arch/parisc/math-emu/dfsqrt.c4
-rw-r--r--arch/parisc/math-emu/driver.c22
-rw-r--r--arch/parisc/math-emu/fcnvff.c8
-rw-r--r--arch/parisc/math-emu/fcnvfu.c16
-rw-r--r--arch/parisc/math-emu/fcnvfut.c16
-rw-r--r--arch/parisc/math-emu/fcnvfx.c16
-rw-r--r--arch/parisc/math-emu/fcnvfxt.c16
-rw-r--r--arch/parisc/math-emu/fcnvuf.c16
-rw-r--r--arch/parisc/math-emu/fcnvxf.c16
-rw-r--r--arch/parisc/math-emu/frnd.c8
-rw-r--r--arch/parisc/math-emu/sfsqrt.c4
-rw-r--r--arch/parisc/mm/fault.c95
-rw-r--r--arch/parisc/mm/fixmap.c3
-rw-r--r--arch/parisc/mm/hugetlbpage.c40
-rw-r--r--arch/parisc/mm/init.c164
-rw-r--r--arch/parisc/mm/ioremap.c62
-rw-r--r--arch/parisc/net/Makefile9
-rw-r--r--arch/parisc/net/bpf_jit.h479
-rw-r--r--arch/parisc/net/bpf_jit_comp32.c1615
-rw-r--r--arch/parisc/net/bpf_jit_comp64.c1209
-rw-r--r--arch/parisc/net/bpf_jit_core.c207
-rw-r--r--arch/parisc/video/Makefile3
-rw-r--r--arch/parisc/video/video-sti.c27
164 files changed, 6260 insertions, 2309 deletions
diff --git a/arch/parisc/Kbuild b/arch/parisc/Kbuild
index a6d3b280ba0c..749b195f2894 100644
--- a/arch/parisc/Kbuild
+++ b/arch/parisc/Kbuild
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += mm/ kernel/ math-emu/
+obj-y += mm/ kernel/ math-emu/ net/
# for cleaning
subdir- += boot
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a98940e64243..47fd9662d800 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -8,25 +8,32 @@ config PARISC
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_HAS_CPU_CACHE_ALIASING
+ select ARCH_HAS_DMA_ALLOC if PA11
+ select ARCH_HAS_DMA_OPS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
- select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_UBSAN
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
+ select ARCH_SPLIT_ARG64 if !64BIT
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
- select DMA_OPS
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select BUILDTIME_TABLE_SORT
+ select HAVE_KERNEL_UNCOMPRESSED
select HAVE_PCI
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select PERF_USE_VMALLOC
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -36,30 +43,40 @@ config PARISC
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
+ select GENERIC_IOREMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ARCH_TOPOLOGY if SMP
+ select ARCH_SUPPORTS_SCHED_MC if SMP && PA8X00
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select TTY # Needed for pdc_cons.c
+ select HAS_IOPORT if PCI || EISA
select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ # select HAVE_ARCH_JUMP_LABEL
+ # select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KFENCE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_EBPF_JIT
+ select ARCH_WANT_DEFAULT_BPF_JIT
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
select GENERIC_SCHED_CLOCK
select GENERIC_IRQ_MIGRATION if SMP
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
- select LEGACY_TIMER_TICK
+ select GENERIC_CLOCKEVENTS
select CPU_NO_EFFICIENT_FFS
select THREAD_INFO_IN_TASK
select NEED_DMA_MAP_STATE
@@ -68,13 +85,13 @@ config PARISC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1)
- select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select TRACE_IRQFLAGS_SUPPORT
select HAVE_FUNCTION_DESCRIPTORS if 64BIT
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
@@ -105,9 +122,12 @@ config ARCH_HAS_ILOG2_U64
default n
config GENERIC_BUG
- bool
- default y
+ def_bool y
depends on BUG
+ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
+
+config GENERIC_BUG_RELATIVE_POINTERS
+ bool
config GENERIC_HWEIGHT
bool
@@ -122,6 +142,20 @@ config TIME_LOW_RES
depends on SMP
default y
+config ARCH_MMAP_RND_BITS_MIN
+ default 18 if 64BIT
+ default 8
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+ default 18 if 64BIT
+ default 13
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default 13
+
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
@@ -129,6 +163,10 @@ config PM
config STACKTRACE_SUPPORT
def_bool y
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
config ISA_DMA_API
bool
@@ -207,9 +245,9 @@ config PARISC_HUGE_KERNEL
def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
config MLONGCALLS
- def_bool y if PARISC_HUGE_KERNEL
bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
depends on PA8X00
+ default PARISC_HUGE_KERNEL
help
If you configure the kernel to include many drivers built-in instead
as modules, the kernel executable may become too big, so that the
@@ -224,9 +262,9 @@ config MLONGCALLS
Enabling this option will probably slow down your kernel.
config 64BIT
- def_bool y if "$(ARCH)" = "parisc64"
bool "64-bit kernel" if "$(ARCH)" = "parisc"
depends on PA8X00
+ default "$(ARCH)" = "parisc64"
help
Enable this if you want to support 64bit kernel on PA-RISC platform.
@@ -243,6 +281,7 @@ choice
config PARISC_PAGE_SIZE_4KB
bool "4KB"
+ select HAVE_PAGE_SIZE_4KB
help
This lets you select the page size of the kernel. For best
performance, a page size of 16KB is recommended. For best
@@ -258,10 +297,12 @@ config PARISC_PAGE_SIZE_4KB
config PARISC_PAGE_SIZE_16KB
bool "16KB"
+ select HAVE_PAGE_SIZE_16KB
depends on PA8X00 && BROKEN && !KFENCE
config PARISC_PAGE_SIZE_64KB
bool "64KB"
+ select HAVE_PAGE_SIZE_64KB
depends on PA8X00 && BROKEN && !KFENCE
endchoice
@@ -282,14 +323,6 @@ config SMP
If you don't know what to do here, say N.
-config SCHED_MC
- bool "Multi-core scheduler support"
- depends on GENERIC_ARCH_TOPOLOGY && PA8X00
- help
- Multi-core scheduler support improves the CPU scheduler's decision
- making when dealing with multi-core CPU chips at a cost of slightly
- increased overhead in some places. If unsure say N here.
-
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
default y
@@ -298,16 +331,6 @@ config IRQSTACKS
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
-config TLB_PTLOCK
- bool "Use page table locks in TLB fault handler"
- depends on SMP
- default n
- help
- Select this option to enable page table locking in the TLB
- fault handler. This ensures that page table entries are
- updated consistently on SMP machines at the expense of some
- loss in performance.
-
config HOTPLUG_CPU
bool
default y if SMP
@@ -340,32 +363,20 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4" if 64BIT
+ default "8" if 64BIT
default "16"
-config KEXEC
- bool "Kexec system call"
- select KEXEC_CORE
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
- but it is independent of the system firmware. And like a reboot
- you can start any kernel with it, not just Linux.
-
- It is an ongoing process to be certain the hardware in a machine
- shutdown, so do not be surprised if this code does not
- initially work for you.
-
-config KEXEC_FILE
- bool "kexec file based system call"
- select KEXEC_CORE
- select KEXEC_ELF
- help
- This enables the kexec_file_load() System call. This is
- file based and takes file descriptors as system call argument
- for kernel and initramfs as opposed to list of segments as
- accepted by previous system call.
-
endmenu
+config ARCH_SUPPORTS_KEXEC
+ def_bool y
+
+config ARCH_SUPPORTS_KEXEC_FILE
+ def_bool y
+
+config ARCH_SELECTS_KEXEC_FILE
+ def_bool y
+ depends on KEXEC_FILE
+ select KEXEC_ELF
+
source "drivers/parisc/Kconfig"
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index f66554cd5c45..f4f164eb12df 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -1 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
+#
+config LIGHTWEIGHT_SPINLOCK_CHECK
+ bool "Enable lightweight spinlock checks"
+ depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK
+ default y
+ help
+ Add checks with low performance impact to the spinlock functions
+ to catch memory overwrites at runtime. For more advanced
+ spinlock debugging you should choose the DEBUG_SPINLOCK option
+ which will detect unitialized spinlocks too.
+ If unsure say Y here.
+
+config TLB_PTLOCK
+ bool "Use page table locks in TLB fault handler"
+ depends on DEBUG_KERNEL && SMP
+ default n
+ help
+ Select this option to enable page table locking in the TLB
+ fault handler. This ensures that page table entries are
+ updated consistently on SMP machines at the expense of some
+ loss in performance.
+
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index a2d8600521f9..48ae3c79557a 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -11,7 +11,7 @@
# Copyright (C) 1994 by Linus Torvalds
# Portions Copyright (C) 1999 The Puffin Group
#
-# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries,
+# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries,
# Mike Shaver, Helge Deller and Martin K. Petersen
#
@@ -39,7 +39,9 @@ endif
export LD_BFD
-# Set default 32 bits cross compilers for vdso
+# Set default 32 bits cross compilers for vdso.
+# This means that for 64BIT, both the 64-bit tools and the 32-bit tools
+# need to be in the path.
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS32_COMPILE := $(call cc-cross-prefix, \
@@ -50,12 +52,12 @@ export CROSS32CC
# Set default cross compiler for kernel build
ifdef cross_compiling
- ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
- endif
+ endif
endif
ifdef CONFIG_DYNAMIC_FTRACE
@@ -119,6 +121,8 @@ export LIBGCC
libs-y += arch/parisc/lib/ $(LIBGCC)
+drivers-$(CONFIG_VIDEO) += arch/parisc/video/
+
boot := arch/parisc/boot
PALO := $(shell if (which palo 2>&1); then : ; \
@@ -137,7 +141,7 @@ palo lifimage: vmlinuz
fi
@if test ! -f "$(PALOCONF)"; then \
cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \
- echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \
+ echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \
echo 'You should check it and re-run "make palo".'; \
echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
false; \
@@ -175,12 +179,8 @@ vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h
endif
-PHONY += vdso_install
-
-vdso_install:
- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@
- $(if $(CONFIG_COMPAT_VDSO), \
- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@)
+vdso-install-y += arch/parisc/kernel/vdso32/vdso32.so
+vdso-install-$(CONFIG_64BIT) += arch/parisc/kernel/vdso64/vdso64.so
install: KBUILD_IMAGE := vmlinux
zinstall: KBUILD_IMAGE := vmlinuz
diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile
index b873ee4720ca..657f967240ee 100644
--- a/arch/parisc/boot/Makefile
+++ b/arch/parisc/boot/Makefile
@@ -10,7 +10,7 @@ subdir- := compressed
$(obj)/image: vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+$(obj)/bzImage: $(if $(CONFIG_KERNEL_UNCOMPRESSED),$(objtree)/vmlinux,$(obj)/compressed/vmlinux) FORCE
$(call if_changed,objcopy)
$(obj)/compressed/vmlinux: FORCE
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index a294a1b58ee7..f8481e4e9d21 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -5,10 +5,6 @@
# create a compressed self-extracting vmlinux image from the original vmlinux
#
-KCOV_INSTRUMENT := n
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-
OBJECTS := head.o real2.o firmware.o misc.o piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
@@ -22,6 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
endif
+KBUILD_CFLAGS += -std=gnu11 -fms-extensions
LDFLAGS_vmlinux := -X -e startup --as-needed -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index 7ee49f5881d1..9c83bd06ef15 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -6,7 +6,7 @@
#include <linux/uaccess.h>
#include <linux/elf.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/page.h>
#include "sizes.h"
@@ -117,7 +117,7 @@ char *strchr(const char *s, int c)
return NULL;
}
-int puts(const char *s)
+static int puts(const char *s)
{
const char *nuline = s;
@@ -172,7 +172,7 @@ static int print_num(unsigned long num, int base)
return 0;
}
-int printf(const char *fmt, ...)
+static int printf(const char *fmt, ...)
{
va_list args;
int i = 0;
@@ -204,13 +204,13 @@ void abort(void)
}
#undef malloc
-void *malloc(size_t size)
+static void *malloc(size_t size)
{
return malloc_gzip(size);
}
#undef free
-void free(void *ptr)
+static void free(void *ptr)
{
return free_gzip(ptr);
}
@@ -278,7 +278,7 @@ static void parse_elf(void *output)
free(phdrs);
}
-unsigned long decompress_kernel(unsigned int started_wide,
+asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide,
unsigned int command_line,
const unsigned int rd_start,
const unsigned int rd_end)
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 8ce0ae370680..5444ce6405f3 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -12,7 +12,6 @@ CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
CONFIG_PA7100LC=y
CONFIG_SMP=y
CONFIG_HZ_100=y
@@ -83,7 +82,6 @@ CONFIG_TUN=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
CONFIG_NET_TULIP=y
@@ -98,6 +96,7 @@ CONFIG_LASI_82596=y
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
@@ -120,7 +119,6 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
CONFIG_LEGACY_PTY_COUNT=64
CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=8
CONFIG_SERIAL_8250_EXTENDED=y
@@ -130,17 +128,53 @@ CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=m
+CONFIG_DRM=m
+CONFIG_DRM_DISPLAY_DP_AUX_CEC=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_CH7006 is not set
+# CONFIG_DRM_NOUVEAU_SIL164 is not set
+CONFIG_DRM_VGEM=m
+CONFIG_DRM_UDL=m
+CONFIG_DRM_MGAG200=m
CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_PM2=m
+CONFIG_FB_PM2_FIFO_DISCONNECT=y
+CONFIG_FB_NVIDIA=m
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_NVIDIA_BACKLIGHT is not set
+CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
+# CONFIG_FB_RIVA_BACKLIGHT is not set
CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
CONFIG_FB_MATROX_G=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
+CONFIG_FB_ATY128=m
+# CONFIG_FB_ATY128_BACKLIGHT is not set
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_BACKLIGHT is not set
+CONFIG_FB_S3=m
+CONFIG_FB_SAVAGE=m
+CONFIG_FB_SAVAGE_I2C=y
+CONFIG_FB_SAVAGE_ACCEL=y
+CONFIG_FB_SIS=m
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -197,12 +231,12 @@ CONFIG_AUXDISPLAY=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_VFAT_FS=y
@@ -216,21 +250,19 @@ CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 57501b0aed92..ce91f9d1fdbf 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -20,9 +20,6 @@ CONFIG_USER_NS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_PA8X00=y
-CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_HPPB=y
CONFIG_IOMMU_CCIO=y
@@ -37,6 +34,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPACTION is not set
CONFIG_MEMORY_FAILURE=y
CONFIG_NET=y
@@ -103,7 +101,6 @@ CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
CONFIG_NET_TULIP=y
@@ -121,6 +118,7 @@ CONFIG_E1000=y
# CONFIG_NET_VENDOR_OKI is not set
CONFIG_QLA3XXX=m
CONFIG_QLCNIC=m
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
@@ -160,7 +158,6 @@ CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_RAW=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=8
CONFIG_SERIAL_8250_RUNTIME_UARTS=8
@@ -186,7 +183,6 @@ CONFIG_WATCHDOG=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_SSB=m
CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_HTC_PASIC3=m
CONFIG_LPC_SCH=m
CONFIG_MFD_SM501=m
CONFIG_REGULATOR=y
@@ -197,13 +193,45 @@ CONFIG_AGP=y
CONFIG_AGP_PARISC=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_CH7006 is not set
+# CONFIG_DRM_NOUVEAU_SIL164 is not set
+CONFIG_DRM_MGAG200=m
CONFIG_FB=y
-CONFIG_FB_MATROX=y
+CONFIG_FB_PM2=m
+CONFIG_FB_PM2_FIFO_DISCONNECT=y
+CONFIG_FB_NVIDIA=m
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_NVIDIA_BACKLIGHT is not set
+CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
+# CONFIG_FB_RIVA_BACKLIGHT is not set
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=y
-CONFIG_FB_MATROX_MAVEN=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
CONFIG_FB_RADEON=y
+# CONFIG_FB_RADEON_BACKLIGHT is not set
+CONFIG_FB_ATY128=m
+# CONFIG_FB_ATY128_BACKLIGHT is not set
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_BACKLIGHT is not set
+CONFIG_FB_S3=m
+CONFIG_FB_SAVAGE=m
+CONFIG_FB_SAVAGE_I2C=y
+CONFIG_FB_SAVAGE_ACCEL=y
+CONFIG_FB_SIS=m
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
+CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HIDRAW=y
@@ -219,18 +247,17 @@ CONFIG_UIO_AEC=m
CONFIG_UIO_SERCOS3=m
CONFIG_UIO_PCI_GENERIC=m
CONFIG_STAGING=y
-CONFIG_QLGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_ISO9660_FS=y
@@ -240,7 +267,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
CONFIG_CONFIGFS_FS=y
-CONFIG_SYSV_FS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFS_V4_1=y
@@ -256,17 +282,14 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_UTF8=m
-CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=m
-CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index e6e7f74c8ac9..4fb596d94c89 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
+generic-y += agp.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += user.h
diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h
deleted file mode 100644
index 14ae54cfd368..000000000000
--- a/arch/parisc/include/asm/agp.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_PARISC_AGP_H
-#define _ASM_PARISC_AGP_H
-
-/*
- * PARISC specific AGP definitions.
- * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
- *
- */
-
-#define map_page_into_agp(page) do { } while (0)
-#define unmap_page_from_agp(page) do { } while (0)
-#define flush_agp_cache() mb()
-
-/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-#define alloc_gatt_pages(order) \
- ((char *)__get_free_pages(GFP_KERNEL, (order)))
-#define free_gatt_pages(table, order) \
- free_pages((unsigned long)(table), (order))
-
-#endif /* _ASM_PARISC_AGP_H */
diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
index 1ed45fd085d3..1601ae4b888d 100644
--- a/arch/parisc/include/asm/alternative.h
+++ b/arch/parisc/include/asm/alternative.h
@@ -13,7 +13,7 @@
#define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */
#define INSN_NOP 0x08000240 /* nop */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/init.h>
#include <linux/types.h>
@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
- ".section .altinstructions, \"aw\" !" \
+ ".section .altinstructions, \"a\" !" \
+ ".align 4 !" \
".word (0b-4-.) !" \
".hword 1, " __stringify(cond) " !" \
".word " __stringify(replacement) " !" \
@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace one single instructions by a new instruction */
#define ALTERNATIVE(from, to, cond, replacement)\
- .section .altinstructions, "aw" ! \
+ .section .altinstructions, "a" ! \
+ .align 4 ! \
.word (from - .) ! \
.hword (to - from)/4, cond ! \
.word replacement ! \
@@ -52,12 +54,13 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace multiple instructions by new code */
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
- .section .altinstructions, "aw" ! \
+ .section .altinstructions, "a" ! \
+ .align 4 ! \
.word (from - .) ! \
.hword -num_instructions, cond ! \
.word (new_instr_ptr - .) ! \
.previous
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_ALTERNATIVE_H */
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 0f0d4a496fef..c20261604f09 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -53,7 +53,7 @@
#define SR_TEMP2 2
#define SR_USER 3
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef CONFIG_64BIT
#define LDREG ldd
@@ -90,10 +90,6 @@
#include <asm/asmregs.h>
#include <asm/psw.h>
- sp = 30
- gp = 27
- ipsw = 22
-
/*
* We provide two versions of each macro to convert from physical
* to virtual and vice versa. The "_r1" versions take one argument
@@ -101,26 +97,28 @@
* version takes two arguments: a src and destination register.
* However, the source and destination registers can not be
* the same register.
+ *
+ * We use add,l to avoid clobbering the C/B bits in the PSW.
*/
.macro tophys grvirt, grphys
- ldil L%(__PAGE_OFFSET), \grphys
- sub \grvirt, \grphys, \grphys
+ ldil L%(-__PAGE_OFFSET), \grphys
+ addl \grvirt, \grphys, \grphys
.endm
-
+
.macro tovirt grphys, grvirt
ldil L%(__PAGE_OFFSET), \grvirt
- add \grphys, \grvirt, \grvirt
+ addl \grphys, \grvirt, \grvirt
.endm
.macro tophys_r1 gr
- ldil L%(__PAGE_OFFSET), %r1
- sub \gr, %r1, \gr
+ ldil L%(-__PAGE_OFFSET), %r1
+ addl \gr, %r1, \gr
.endm
-
+
.macro tovirt_r1 gr
ldil L%(__PAGE_OFFSET), %r1
- add \gr, %r1, \gr
+ addl \gr, %r1, \gr
.endm
.macro delay value
@@ -578,9 +576,11 @@
*/
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \
+ .align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \
+ or %r0,%r0,%r0 ! \
.previous
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index dd5a299ada69..d4f023887ff8 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -73,10 +73,6 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
return READ_ONCE((v)->counter);
}
-/* exported interface */
-#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
#define ATOMIC_OP(op, c_op) \
static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
@@ -122,6 +118,11 @@ static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
@@ -131,6 +132,10 @@ ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -185,6 +190,11 @@ static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -194,6 +204,10 @@ ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
@@ -218,11 +232,6 @@ arch_atomic64_read(const atomic64_t *v)
return READ_ONCE((v)->counter);
}
-/* exported interface */
-#define arch_atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
index c705decf2bed..519b1903c5ed 100644
--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -4,7 +4,7 @@
#include <asm/alternative.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* The synchronize caches instruction executes as a nop on systems in
which all memory references are performed in order. */
@@ -93,5 +93,5 @@ do { \
})
#include <asm-generic/barrier.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 0ec9cfc5131f..bd1280a8a5ec 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -123,7 +123,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
* cycles for each mispredicted branch.
*/
-static __inline__ unsigned long __ffs(unsigned long x)
+static __inline__ __attribute_const__ unsigned long __ffs(unsigned long x)
{
unsigned long ret;
@@ -161,7 +161,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
* This is defined the same way as the libc and compiler builtin
* ffs routines, therefore differs in spirit from the above ffz (man ffs).
*/
-static __inline__ int ffs(int x)
+static __inline__ __attribute_const__ int ffs(int x)
{
return x ? (__ffs((unsigned long)x) + 1) : 0;
}
@@ -171,7 +171,7 @@ static __inline__ int ffs(int x)
* fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __inline__ int fls(unsigned int x)
+static __inline__ __attribute_const__ int fls(unsigned int x)
{
int ret;
if (!x)
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 4b6d60b94124..5cf35489ad80 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -2,8 +2,6 @@
#ifndef _PARISC_BUG_H
#define _PARISC_BUG_H
-#include <linux/kernel.h> /* for BUGFLAG_TAINT */
-
/*
* Tell the user there is some problem.
* The offending file and line are encoded in the __bug_table section.
@@ -17,24 +15,27 @@
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
-#if defined(CONFIG_64BIT)
-#define ASM_WORD_INSN ".dword\t"
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+# define __BUG_REL(val) ".word " __stringify(val) " - ."
#else
-#define ASM_WORD_INSN ".word\t"
+# define __BUG_REL(val) ".word " __stringify(val)
#endif
+
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"aw\"\n" \
- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
- "\t.short %c1, %c2\n" \
- "\t.org 2b+%c3\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.align 4\n" \
+ "2:\t" __BUG_REL(1b) "\n" \
+ "\t" __BUG_REL(%c0) "\n" \
+ "\t.short %1, %2\n" \
+ "\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (0), "i" (sizeof(struct bug_entry)) ); \
+ "i" (0), "i" (sizeof(struct bug_entry)) ); \
unreachable(); \
} while(0)
@@ -47,28 +48,31 @@
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define __WARN_FLAGS(flags) \
+#define __WARN_FLAGS(cond_str, flags) \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"aw\"\n" \
- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
- "\t.short %c1, %c2\n" \
- "\t.org 2b+%c3\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.align 4\n" \
+ "2:\t" __BUG_REL(1b) "\n" \
+ "\t" __BUG_REL(%c0) "\n" \
+ "\t.short %1, %2\n" \
+ "\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
+ : : "i" (WARN_CONDITION_STR(cond_str) __FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#else
-#define __WARN_FLAGS(flags) \
+#define __WARN_FLAGS(cond_str, flags) \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"aw\"\n" \
- "2:\t" ASM_WORD_INSN "1b\n" \
- "\t.short %c0\n" \
- "\t.org 2b+%c1\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.align 4\n" \
+ "2:\t" __BUG_REL(1b) "\n" \
+ "\t.short %0\n" \
+ "\t.blockz %1-4-2\n" \
"\t.popsection" \
: : "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \
diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h
deleted file mode 100644
index 0a7f9db6bd1c..000000000000
--- a/arch/parisc/include/asm/bugs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/asm-parisc/bugs.h
- *
- * Copyright (C) 1999 Mike Shaver
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#include <asm/processor.h>
-
-static inline void check_bugs(void)
-{
-// identify_cpu(&boot_cpu_data);
-}
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index e23d06b51a20..3f8d3be6ef24 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -16,11 +16,20 @@
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#ifdef CONFIG_PA20
+#define ARCH_DMA_MINALIGN 128
+#else
+#define ARCH_DMA_MINALIGN 32
+#endif
+#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
+
+#define arch_slab_minalign() ((unsigned)dcache_stride)
+#define cache_line_size() dcache_stride
+#define dma_get_cache_alignment cache_line_size
#define __read_mostly __section(".data..read_mostly")
@@ -37,6 +46,7 @@ extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
+extern struct pdc_btlb_info btlb_info;
void parisc_setup_cache_timing(void);
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \
@@ -56,7 +66,7 @@ void parisc_setup_cache_timing(void);
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#define asm_syncdma() asm volatile("syncdma" :::"memory")
-#endif /* ! __ASSEMBLY__ */
+#endif /* ! __ASSEMBLER__ */
/* Classes of processor wrt: disabling space register hashing */
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 0bdee6724132..8394718870e1 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -31,28 +31,36 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
-void flush_kernel_dcache_page_addr(const void *addr);
-
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
+/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
-#define flush_cache_vmap(start, end) flush_cache_all()
-#define flush_cache_vunmap(start, end) flush_cache_all()
+void flush_cache_vmap(unsigned long start, unsigned long end);
+#define flush_cache_vmap_early(start, end) do { } while (0)
+void flush_cache_vunmap(unsigned long start, unsigned long end);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-void flush_dcache_page(struct page *page);
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
+ xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
+ xa_unlock_irqrestore(&mapping->i_pages, flags)
-#define flush_icache_page(vma,page) do { \
- flush_kernel_dcache_page_addr(page_address(page)); \
- flush_kernel_icache_page(page_address(page)); \
-} while (0)
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr);
+#define flush_icache_pages flush_icache_pages
#define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \
@@ -68,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* defined in pacache.S exported in cache.c used by flush_anon_page */
-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
-static inline void kunmap_flush_on_unmap(const void *addr)
-{
- flush_kernel_dcache_page_addr(addr);
-}
+void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/cachetype.h b/arch/parisc/include/asm/cachetype.h
new file mode 100644
index 000000000000..e0868a1d3c47
--- /dev/null
+++ b/arch/parisc/include/asm/cachetype.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PARISC_CACHETYPE_H
+#define __ASM_PARISC_CACHETYPE_H
+
+#include <linux/types.h>
+
+#define cpu_dcache_is_aliasing() true
+
+#endif
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index 3c43baca7b39..2aceebcd695c 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
" addc %0, %5, %0\n"
" addc %0, %3, %0\n"
"1: ldws,ma 4(%1), %3\n"
-" addib,< 0, %2, 1b\n"
+" addib,> -1, %2, 1b\n"
" addc %0, %3, %0\n"
"\n"
" extru %0, 31, 16, %4\n"
@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
+" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
" add %4, %0, %0\n"
@@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
" depdi 0, 31, 32, %0\n"/* clear upper half */
-" add %4, %0, %0\n" /* fold into 32-bits */
-" addc 0, %0, %0\n" /* add carry */
+" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
+" addc 0, %0, %0\n" /* add final carry */
#else
@@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" ldw,ma 4(%2), %7\n" /* 4th daddr */
" addc %6, %0, %0\n"
" addc %7, %0, %0\n"
-" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
+" addc %3, %0, %0\n" /* fold in proto+len */
+" addc 0, %0, %0\n" /* add carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 5f274be10567..bf0a0f1189eb 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile unsigned long *);
/* optimizer better get rid of switch since size is a constant */
static inline unsigned long
-__xchg(unsigned long x, volatile void *ptr, int size)
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
@@ -49,33 +49,31 @@ __xchg(unsigned long x, volatile void *ptr, int size)
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
+ __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
- unsigned int new_);
-extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
+extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
- switch (size) {
+ return
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
+ size == 8 ? __cmpxchg_u64(ptr, old, new_) :
#endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr,
- (unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h
index dc7aea07c3f3..2814529a4c28 100644
--- a/arch/parisc/include/asm/current.h
+++ b/arch/parisc/include/asm/current.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_CURRENT_H
#define _ASM_PARISC_CURRENT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
static __always_inline struct task_struct *get_current(void)
@@ -16,6 +16,6 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_CURRENT_H */
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d5bd94247371..635665004fe6 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -21,7 +21,7 @@
extern const struct dma_map_ops *hppa_dma_ops;
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
return hppa_dma_ops;
}
diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h
index 9e8c101de902..582fb5d1a5d5 100644
--- a/arch/parisc/include/asm/dma.h
+++ b/arch/parisc/include/asm/dma.h
@@ -14,6 +14,8 @@
#define dma_outb outb
#define dma_inb inb
+extern unsigned long pcxl_dma_start;
+
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.
diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h
index f4512db86a19..526f4a79262c 100644
--- a/arch/parisc/include/asm/dwarf.h
+++ b/arch/parisc/include/asm/dwarf.h
@@ -6,7 +6,7 @@
#ifndef _ASM_PARISC_DWARF_H
#define _ASM_PARISC_DWARF_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
@@ -15,6 +15,6 @@
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_DWARF_H */
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index cc426d365892..2d73d3c3cd37 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -163,8 +163,7 @@ typedef struct elf32_fdesc {
/* Format for the Elf64 Function descriptor */
typedef struct elf64_fdesc {
- __u64 dummy[2]; /* FIXME: nothing uses these, why waste
- * the space */
+ __u64 dummy[2]; /* used by 64-bit eBPF and tracing functions */
__u64 addr;
__u64 gp;
} Elf64_Fdesc;
@@ -350,15 +349,7 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
-/* Masks for stack and mmap randomization */
-#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
-#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
-#define STACK_RND_MASK MMAP_RND_MASK
-
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *);
-#define arch_randomize_brk arch_randomize_brk
-
+#define STACK_RND_MASK 0x7ff /* 8MB of VA */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h
new file mode 100644
index 000000000000..4ea23e3d79dc
--- /dev/null
+++ b/arch/parisc/include/asm/extable.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_EXTABLE_H
+#define __PARISC_EXTABLE_H
+
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+
+/*
+ * The exception table consists of three addresses:
+ *
+ * - A relative address to the instruction that is allowed to fault.
+ * - A relative address at which the program should continue (fixup routine)
+ * - An asm statement which specifies which CPU register will
+ * receive -EFAULT when an exception happens if the lowest bit in
+ * the fixup address is set.
+ *
+ * Note: The register specified in the err_opcode instruction will be
+ * modified at runtime if a fault happens. Register %r0 will be ignored.
+ *
+ * Since relative addresses are used, 32bit values are sufficient even on
+ * 64bit kernel.
+ */
+
+struct pt_regs;
+int fixup_exception(struct pt_regs *regs);
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+struct exception_table_entry {
+ int insn; /* relative address of insn that is allowed to fault. */
+ int fixup; /* relative address of fixup routine */
+ int err_opcode; /* sample opcode with register which holds error code */
+};
+
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
+ ".section __ex_table,\"aw\"\n" \
+ ".align 4\n" \
+ ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
+ opcode "\n" \
+ ".previous\n"
+
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT on fault into the register specified by the err_opcode instruction,
+ * and zeroes the target register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_VAR(__err_var) \
+ int __err_var = 0
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
+
+static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
+ struct exception_table_entry *b,
+ struct exception_table_entry tmp,
+ int delta)
+{
+ a->fixup = b->fixup + delta;
+ b->fixup = tmp.fixup - delta;
+ a->err_opcode = b->err_opcode;
+ b->err_opcode = tmp.err_opcode;
+}
+#define swap_ex_entry_fixup swap_ex_entry_fixup
+
+#endif
diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
deleted file mode 100644
index 55d29c4f716e..000000000000
--- a/arch/parisc/include/asm/fb.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-}
-
-#if defined(CONFIG_FB_STI)
-int fb_is_primary_device(struct fb_info *info);
-#else
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-#endif
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index 5cd80ce1163a..9cafa449c4a7 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -39,7 +39,7 @@
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (FIXMAP_START)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
enum fixed_addresses {
@@ -59,6 +59,6 @@ extern void *parisc_vmalloc_start;
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
void clear_fixmap(enum fixed_addresses idx);
-#endif /*__ASSEMBLY__*/
+#endif /*__ASSEMBLER__*/
#endif /*_ASM_FIXMAP_H*/
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h
index b318a7df52f6..f15b69fea901 100644
--- a/arch/parisc/include/asm/floppy.h
+++ b/arch/parisc/include/asm/floppy.h
@@ -8,9 +8,9 @@
#ifndef __ASM_PARISC_FLOPPY_H
#define __ASM_PARISC_FLOPPY_H
+#include <linux/sizes.h>
#include <linux/vmalloc.h>
-
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
@@ -20,15 +20,12 @@
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-
-#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
-
+ (!(vdma) && \
+ ((unsigned long)(a) / SZ_64K != ((unsigned long)(a) + (s) - 1) / SZ_64K))
#define SW fd_routine[use_virtual_dma&1]
#define CSW fd_routine[can_use_virtual_dma & 1]
-
#define fd_inb(base, reg) readb((base) + (reg))
#define fd_outb(value, base, reg) writeb(value, (base) + (reg))
@@ -206,7 +203,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
- if (CROSS_64KB(addr, size)) {
+ if (_CROSS_64KB(addr, size, use_virtual_dma & 1)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index a7cf0d05ccf4..8b89d2b642eb 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_FTRACE_H
#define _ASM_PARISC_FTRACE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern void mcount(void);
#define MCOUNT_ADDR ((unsigned long)mcount)
@@ -12,6 +12,10 @@ extern void mcount(void);
extern unsigned long sys_call_table[];
extern unsigned long return_address(unsigned int);
+struct ftrace_regs;
+extern void ftrace_function_trampoline(unsigned long parent,
+ unsigned long self_addr, unsigned long org_sp_gr3,
+ struct ftrace_regs *fregs);
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_caller(void);
@@ -25,6 +29,6 @@ unsigned long ftrace_call_adjust(unsigned long addr);
#define ftrace_return_address(n) return_address(n)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/grfioctl.h b/arch/parisc/include/asm/grfioctl.h
index a740844a1581..597201530d20 100644
--- a/arch/parisc/include/asm/grfioctl.h
+++ b/arch/parisc/include/asm/grfioctl.h
@@ -59,42 +59,4 @@
#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
-/* structure for ioctl(GCDESCRIBE) */
-
-#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
-
-struct grf_fbinfo {
- unsigned int id; /* upper 32 bits of graphics id */
- unsigned int mapsize; /* mapped size of framebuffer */
- unsigned int dwidth, dlength;/* x and y sizes */
- unsigned int width, length; /* total x and total y size */
- unsigned int xlen; /* x pitch size */
- unsigned int bpp, bppu; /* bits per pixel and used bpp */
- unsigned int npl, nplbytes; /* # of planes and bytes per plane */
- char name[32]; /* name of the device (from ROM) */
- unsigned int attr; /* attributes */
- gaddr_t fbbase, regbase;/* framebuffer and register base addr */
- gaddr_t regions[6]; /* region bases */
-};
-
-#define GCID _IOR('G', 0, int)
-#define GCON _IO('G', 1)
-#define GCOFF _IO('G', 2)
-#define GCAON _IO('G', 3)
-#define GCAOFF _IO('G', 4)
-#define GCMAP _IOWR('G', 5, int)
-#define GCUNMAP _IOWR('G', 6, int)
-#define GCMAP_HPUX _IO('G', 5)
-#define GCUNMAP_HPUX _IO('G', 6)
-#define GCLOCK _IO('G', 7)
-#define GCUNLOCK _IO('G', 8)
-#define GCLOCK_MINIMUM _IO('G', 9)
-#define GCUNLOCK_MINIMUM _IO('G', 10)
-#define GCSTATIC_CMAP _IO('G', 11)
-#define GCVARIABLE_CMAP _IO('G', 12)
-#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
-#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
-#define GCFASTLOCK _IO('G', 26)
-
#endif /* __ASM_PARISC_GRFIOCTL_H */
-
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
index f7f078c2872c..21e9ace17739 100644
--- a/arch/parisc/include/asm/hugetlb.h
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -6,26 +6,11 @@
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
+ pte_t *ptep, pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
-
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/parisc/include/asm/ide.h b/arch/parisc/include/asm/ide.h
deleted file mode 100644
index 7aa75b93a1b6..000000000000
--- a/arch/parisc/include/asm/ide.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-parisc/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the PARISC architecture specific IDE code.
- */
-
-#ifndef __ASM_PARISC_IDE_H
-#define __ASM_PARISC_IDE_H
-
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = __raw_readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = __raw_readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- __raw_writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- __raw_writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
-
-#endif /* __ASM_PARISC_IDE_H */
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index c05e781be2f5..f01ad3ad60b5 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -125,19 +125,15 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
/*
* The standard PCI ioremap interfaces
*/
-void __iomem *ioremap(unsigned long offset, unsigned long size);
-#define ioremap_wc ioremap
-#define ioremap_uc ioremap
-#define pci_iounmap pci_iounmap
+#define ioremap_prot ioremap_prot
+
+#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_NO_CACHE)
-extern void iounmap(const volatile void __iomem *addr);
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP))
-void memset_io(volatile void __iomem *addr, unsigned char val, int count);
-void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
-void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
+#define pci_iounmap pci_iounmap
/* Port-space IO */
@@ -228,46 +224,58 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
#ifdef CONFIG_64BIT
-#define ioread64 ioread64
-#define ioread64be ioread64be
-#define iowrite64 iowrite64
-#define iowrite64be iowrite64be
extern u64 ioread64(const void __iomem *addr);
extern u64 ioread64be(const void __iomem *addr);
+#define ioread64 ioread64
+#define ioread64be ioread64be
+
extern void iowrite64(u64 val, void __iomem *addr);
extern void iowrite64be(u64 val, void __iomem *addr);
+#define iowrite64 iowrite64
+#define iowrite64be iowrite64be
#endif
-#include <asm-generic/iomap.h>
-/*
- * These get provided from <asm-generic/iomap.h> since parisc does not
- * select GENERIC_IOMAP.
- */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
#define ioport_map ioport_map
#define ioport_unmap ioport_unmap
+
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
#define ioread8 ioread8
#define ioread16 ioread16
#define ioread32 ioread32
#define ioread16be ioread16be
#define ioread32be ioread32be
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite16be(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite32be(u32, void __iomem *);
#define iowrite8 iowrite8
#define iowrite16 iowrite16
#define iowrite32 iowrite32
#define iowrite16be iowrite16be
#define iowrite32be iowrite32be
+
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
#define iowrite8_rep iowrite8_rep
#define iowrite16_rep iowrite16_rep
#define iowrite32_rep iowrite32_rep
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
extern int devmem_is_allowed(unsigned long pfn);
#include <asm-generic/io.h>
diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h
index 38a19c0bac3a..00fd87724588 100644
--- a/arch/parisc/include/asm/irqflags.h
+++ b/arch/parisc/include/asm/irqflags.h
@@ -31,6 +31,11 @@ static inline unsigned long arch_local_irq_save(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
+ /* warn if IRQs are on although they should be off */
+ if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+ if (arch_local_save_flags() & PSW_I)
+ asm volatile("break 6,6\n"); /* SPINLOCK_BREAK_INSN */
+
asm volatile("mtsm %0" : : "r" (flags) : "memory");
}
diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
index af2a598bc0f8..f325ae3c622f 100644
--- a/arch/parisc/include/asm/jump_label.h
+++ b/arch/parisc/include/asm/jump_label.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stringify.h>
@@ -12,13 +12,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
+ ".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
- : : "i" (&((char *)key)[branch]) : : l_yes);
+ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
+ : : l_yes);
return false;
l_yes:
@@ -27,18 +29,20 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
+ ".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
- : : "i" (&((char *)key)[branch]) : : l_yes);
+ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
+ : : l_yes);
return false;
l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h
index 87e174006995..bf31e2d50df9 100644
--- a/arch/parisc/include/asm/kexec.h
+++ b/arch/parisc/include/asm/kexec.h
@@ -14,7 +14,7 @@
#define KEXEC_ARCH KEXEC_ARCH_PARISC
#define ARCH_HAS_KIMAGE_ARCH
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct kimage_arch {
unsigned long initrd_start;
@@ -28,6 +28,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Dummy implementation for now */
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PARISC_KEXEC_H */
diff --git a/arch/parisc/include/asm/kgdb.h b/arch/parisc/include/asm/kgdb.h
index f23e7f8f13a5..9ece98bc6d9d 100644
--- a/arch/parisc/include/asm/kgdb.h
+++ b/arch/parisc/include/asm/kgdb.h
@@ -17,9 +17,11 @@
#define NUMREGBYTES sizeof(struct parisc_gdb_regs)
#define BUFMAX 4096
+#define KGDB_MAX_BREAKPOINTS 40
+
#define CACHE_FLUSH_IS_SAFE 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void arch_kgdb_breakpoint(void)
{
diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h
index 0a175ac87698..0f42f5c8e3b6 100644
--- a/arch/parisc/include/asm/kprobes.h
+++ b/arch/parisc/include/asm/kprobes.h
@@ -10,9 +10,10 @@
#ifndef _PARISC_KPROBES_H
#define _PARISC_KPROBES_H
+#include <asm-generic/kprobes.h>
+
#ifdef CONFIG_KPROBES
-#include <asm-generic/kprobes.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index 6d28b5514699..47ebc4c91eaf 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -2,39 +2,42 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
-#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
- for the semaphore. */
+ for the semaphore. */
+
+/* From: "Jim Hull" <jim.hull of hp.com>
+ I've attached a summary of the change, but basically, for PA 2.0, as
+ long as the ",CO" (coherent operation) completer is implemented, then the
+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+ they only require "natural" alignment (4-byte for ldcw, 8-byte for
+ ldcd).
+
+ Although the cache control hint is accepted by all PA 2.0 processors,
+ it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
+ require 16-byte alignment. If the address is unaligned, the operation
+ of the instruction is undefined. The ldcw instruction does not generate
+ unaligned data reference traps so misaligned accesses are not detected.
+ This hid the problem for years. So, restore the 16-byte alignment dropped
+ by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
-#define __PA_LDCW_ALIGN_ORDER 4
#define __ldcw_align(a) ({ \
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
-#define __LDCW "ldcw"
-#else /*CONFIG_PA20*/
-/* From: "Jim Hull" <jim.hull of hp.com>
- I've attached a summary of the change, but basically, for PA 2.0, as
- long as the ",CO" (coherent operation) completer is specified, then the
- 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
- they only require "natural" alignment (4-byte for ldcw, 8-byte for
- ldcd). */
-
-#define __PA_LDCW_ALIGNMENT 4
-#define __PA_LDCW_ALIGN_ORDER 2
-#define __ldcw_align(a) (&(a)->slock)
+#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
-
-#endif /*!CONFIG_PA20*/
+#else
+#define __LDCW "ldcw"
+#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload
@@ -52,7 +55,7 @@
})
#ifdef CONFIG_SMP
-# define __lock_aligned __section(".data..lock_aligned")
+# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
#endif
#endif /* __PARISC_LDCW_H */
diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
index 6de13d08a388..0aea47eff48d 100644
--- a/arch/parisc/include/asm/led.h
+++ b/arch/parisc/include/asm/led.h
@@ -11,8 +11,8 @@
#define LED1 0x02
#define LED0 0x01 /* bottom (or furthest left) LED */
-#define LED_LAN_TX LED0 /* for LAN transmit activity */
-#define LED_LAN_RCV LED1 /* for LAN receive activity */
+#define LED_LAN_RCV LED0 /* for LAN receive activity */
+#define LED_LAN_TX LED1 /* for LAN transmit activity */
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */
@@ -25,19 +25,13 @@
#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */
/* register_led_driver() */
-int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
-
-/* registers the LED regions for procfs */
-void __init register_led_regions(void);
+int register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
#ifdef CONFIG_CHASSIS_LCD_LED
/* writes a string to the LCD display (if possible on this h/w) */
-int lcd_print(const char *str);
+void lcd_print(const char *str);
#else
-#define lcd_print(str)
+#define lcd_print(str) do { } while (0)
#endif
-/* main LED initialization function (uses PDC) */
-int __init led_init(void);
-
#endif /* LED_H */
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index cd6fe4febead..d4cad492b971 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -15,7 +15,7 @@
*/
#define ASM_NL !
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define ENTRY(name) \
ALIGN !\
@@ -35,6 +35,6 @@ name: ASM_NL\
.procend ASM_NL\
ENDPROC(name)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_LINKAGE_H */
diff --git a/arch/parisc/include/asm/machdep.h b/arch/parisc/include/asm/machdep.h
deleted file mode 100644
index 215d2c43989d..000000000000
--- a/arch/parisc/include/asm/machdep.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PARISC_MACHDEP_H
-#define _PARISC_MACHDEP_H
-
-#include <linux/notifier.h>
-
-#define MACH_RESTART 1
-#define MACH_HALT 2
-#define MACH_POWER_ON 3
-#define MACH_POWER_OFF 4
-
-extern struct notifier_block *mach_notifier;
-extern void pa7300lc_init(void);
-
-extern void (*cpu_lpmc)(int, struct pt_regs *);
-
-#endif
diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h
deleted file mode 100644
index 1314390b9034..000000000000
--- a/arch/parisc/include/asm/mckinley.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_PARISC_MCKINLEY_H
-#define ASM_PARISC_MCKINLEY_H
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
-#endif /*ASM_PARISC_MCKINLEY_H*/
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 000000000000..663f587dc789
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <linux/fs.h>
+#include <uapi/asm/mman.h>
+
+/* PARISC cannot allow mdwe as it needs writable stacks */
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return false;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags)
+{
+ /*
+ * The stack on parisc grows upwards, so if userspace requests memory
+ * for a stack, mark it with VM_GROWSUP so that the stack expansion in
+ * the fault handler will work.
+ */
+ if (flags & MAP_STACK)
+ return VM_GROWSUP;
+
+ return 0;
+}
+#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 6faaaa3ebe9b..8f4e51071ea1 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -4,20 +4,11 @@
#include <linux/const.h>
-#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
-# define PAGE_SHIFT 12
-#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
-# define PAGE_SHIFT 14
-#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
-# define PAGE_SHIFT 16
-#else
-# error "unknown default kernel page size"
-#endif
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#include <vdso/page.h>
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
#include <asm/cache.h>
@@ -102,7 +93,7 @@ typedef struct __physmem_range {
extern physmem_range_t pmem_ranges[];
extern int npmem_ranges;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* WARNING: The definitions below must match exactly to sizeof(pte_t)
* etc
@@ -148,17 +139,13 @@ extern int npmem_ranges;
#define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
/* These macros don't work for 64-bit C code -- don't allow in C at all */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define PA(x) ((x)-__PAGE_OFFSET)
# define VA(x) ((x)+__PAGE_OFFSET)
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#ifndef CONFIG_SPARSEMEM
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif
-
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
@@ -179,7 +166,6 @@ extern int npmem_ranges;
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 4de3b391d812..9e74cef4d774 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -41,7 +41,7 @@ struct parisc_driver {
#define to_parisc_device(d) container_of(d, struct parisc_device, dev)
-#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
+#define to_parisc_driver(d) container_of_const(d, struct parisc_driver, drv)
#define parisc_parent(d) to_parisc_device(d->dev.parent)
static inline const char *parisc_pathname(struct parisc_device *d)
@@ -61,7 +61,7 @@ parisc_get_drvdata(struct parisc_device *d)
return dev_get_drvdata(&d->dev);
}
-extern struct bus_type parisc_bus_type;
+extern const struct bus_type parisc_bus_type;
int iosapic_serial_irq(struct parisc_device *dev);
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 40793bef8429..6080a1516b34 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -4,7 +4,7 @@
#include <uapi/asm/pdc.h>
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
extern int parisc_narrow_firmware;
@@ -44,10 +44,11 @@ int pdc_model_capabilities(unsigned long *capabilities);
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
-#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot);
+int pdc_btlb_purge_all(void);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
-#endif /* !CONFIG_PA20 */
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
@@ -80,6 +81,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
int pdc_do_reset(void);
int pdc_soft_power_info(unsigned long *power_reg);
int pdc_soft_power_button(int sw_control);
+int pdc_soft_power_button_panic(int sw_control);
void pdc_io_reset(void);
void pdc_io_reset_devices(void);
int pdc_iodc_getc(void);
@@ -87,8 +89,8 @@ int pdc_iodc_print(const unsigned char *str, unsigned count);
void pdc_emergency_unlock(void);
int pdc_sti_call(unsigned long func, unsigned long flags,
- unsigned long inptr, unsigned long outputr,
- unsigned long glob_cfg);
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg, int do_call64);
int __pdc_cpu_rendezvous(void);
void pdc_cpu_rendezvous_lock(void);
@@ -107,5 +109,5 @@ static inline char * os_id_to_string(u16 os_id) {
}
}
-#endif /* !defined(__ASSEMBLY__) */
+#endif /* !defined(__ASSEMBLER__) */
#endif /* _PARISC_PDC_H */
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index 8f160375b865..84ac81b1adde 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -210,7 +210,7 @@
#define PDC_PAT_SYSTEM_INFO 76L
/* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#ifdef CONFIG_64BIT
@@ -389,6 +389,6 @@ extern int pdc_pat_mem_get_dimm_phys_location(
struct pdc_pat_mem_phys_mem_location *pret,
unsigned long phys_addr);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* ! __PARISC_PATPDC_H */
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
index 1e0fd8ba6c03..8a2925029d15 100644
--- a/arch/parisc/include/asm/perf_event.h
+++ b/arch/parisc/include/asm/perf_event.h
@@ -1,6 +1,12 @@
#ifndef __ASM_PARISC_PERF_EVENT_H
#define __ASM_PARISC_PERF_EVENT_H
-/* Empty, just to avoid compiling error */
+#include <asm/psw.h>
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->gr[0] = KERNEL_PSW; \
+ (regs)->iaoq[0] = (__ip); \
+ asm volatile("copy %%sp, %0\n":"=r"((regs)->gr[30])); \
+}
#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index e3e142b1c5c5..3b84ee93edaa 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -11,27 +11,12 @@
#include <asm/cache.h>
#define __HAVE_ARCH_PMD_ALLOC_ONE
-#define __HAVE_ARCH_PMD_FREE
-#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
/* Allocate the top level pgd (page directory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd;
-
- pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
- if (unlikely(pgd == NULL))
- return NULL;
-
- memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER);
-
- return pgd;
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
+ return __pgd_alloc(mm, PGD_TABLE_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
@@ -46,17 +31,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd;
-
- pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER);
- if (likely(pmd))
- memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER);
- return pmd;
-}
+ struct ptdesc *ptdesc;
+ gfp_t gfp = GFP_PGTABLE_USER;
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- free_pages((unsigned long)pmd, PMD_TABLE_ORDER);
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER);
+ if (!ptdesc)
+ return NULL;
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
+ }
+ return ptdesc_address(ptdesc);
}
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index ea357430aafe..2c139a4dbf4b 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -12,7 +12,7 @@
#include <asm/fixmap.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* we simulate an x86-style page table for the linux mm code
*/
@@ -73,16 +73,7 @@ extern void __update_cache(pte_t pte);
mb(); \
} while(0)
-#define set_pte_at(mm, addr, pteptr, pteval) \
- do { \
- if (pte_present(pteval) && \
- pte_user(pteval)) \
- __update_cache(pteval); \
- *(pteptr) = (pteval); \
- purge_tlb_entries(mm, addr); \
- } while (0)
-
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -218,6 +209,9 @@ extern void __update_cache(pte_t pte);
#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
+/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
+
/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
* are page-aligned, we don't care about the PAGE_OFFSET bits, except
* for a few meta-information bits, so we shift the address to be
@@ -232,7 +226,7 @@ extern void __update_cache(pte_t pte);
#define PxD_FLAG_SHIFT (4)
#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
@@ -282,7 +276,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_user(x) (pte_val(x) & _PAGE_USER)
-#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
+#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -328,7 +322,7 @@ static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; retu
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
/*
@@ -344,10 +338,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re
#endif
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
#define __mk_pte(addr,pgprot) \
({ \
pte_t __pte; \
@@ -357,8 +347,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; re
__pte; \
})
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
pte_t pte;
@@ -388,31 +376,84 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
extern void paging_init (void);
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ if (pte_present(pte) && pte_user(pte))
+ __update_cache(pte);
+ for (;;) {
+ *ptep = pte;
+ purge_tlb_entries(mm, addr);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pte) += 1 << PFN_PTE_SHIFT;
+ addr += PAGE_SIZE;
+ }
+}
+#define set_ptes set_ptes
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
/* Used for deferring calls to flush_dcache_page() */
#define PG_dcache_dirty PG_arch_1
-#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
-
-/* Encode and de-code a swap entry */
+#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) __update_cache(*ptep)
+#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep)
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs (32bit):
+ *
+ * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * <---------------- offset -----------------> P E <ofs> < type ->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * _PAGE_PRESENT (P) must be 0.
+ *
+ * For the 64bit version, the offset is extended by 32bit.
+ */
#define __swp_type(x) ((x).val & 0x1f)
-#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
- (((x).val >> 8) & ~0x7) )
-#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
- ((offset & 0x7) << 6) | \
- ((offset & ~0x7) << 8) })
+#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \
+ (((x).val >> 10) << 3) )
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) & 0x1f) | \
+ ((offset & 0x7) << 5) | \
+ ((offset >> 3) << 10) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#define ptep_get ptep_get
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
- if (!pte_young(*ptep))
- return 0;
-
- pte = *ptep;
+ pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@@ -420,17 +461,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
-struct mm_struct;
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte;
-
- old_pte = *ptep;
- set_pte_at(mm, addr, ptep, __pte(0));
-
- return old_pte;
-}
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
@@ -438,10 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define pte_same(A,B) (pte_val(A) == pte_val(B))
-struct seq_file;
-extern void arch_report_meminfo(struct seq_file *m);
-
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* TLB page size encoding - see table 3-1 in parisc20.pdf */
@@ -471,7 +502,8 @@ extern void arch_report_meminfo(struct seq_file *m);
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
index 6e63f720024d..748eefb27c68 100644
--- a/arch/parisc/include/asm/prefetch.h
+++ b/arch/parisc/include/asm/prefetch.h
@@ -16,7 +16,7 @@
#ifndef __ASM_PARISC_PREFETCH_H
#define __ASM_PARISC_PREFETCH_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
@@ -40,6 +40,6 @@ static inline void prefetchw(const void *addr)
#endif /* CONFIG_PA20 */
#endif /* CONFIG_PREFETCH */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index a608970b249a..dd0b5e199559 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -9,8 +9,9 @@
#ifndef __ASM_PARISC_PROCESSOR_H
#define __ASM_PARISC_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/threads.h>
+#include <linux/irqreturn.h>
#include <asm/assembly.h>
#include <asm/prefetch.h>
@@ -19,7 +20,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/percpu.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -44,8 +45,10 @@
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
+struct rlimit;
+unsigned long mmap_upper_limit(const struct rlimit *rlim_stack);
unsigned long calc_max_stack_size(unsigned long stack_max);
/*
@@ -286,12 +289,42 @@ extern int _parisc_requires_coherency;
#endif
extern int running_on_qemu;
+extern int parisc_narrow_firmware;
extern void __noreturn toc_intr(struct pt_regs *regs);
extern void toc_handler(void);
extern unsigned int toc_handler_size;
extern unsigned int toc_handler_csum;
-
-#endif /* __ASSEMBLY__ */
+extern void do_cpu_irq_mask(struct pt_regs *);
+extern irqreturn_t timer_interrupt(int, void *);
+extern irqreturn_t ipi_interrupt(int, void *);
+extern void parisc_clockevent_init(void);
+extern void handle_interruption(int, struct pt_regs *);
+
+/* called from assembly code: */
+extern void start_parisc(void);
+extern void smp_callin(unsigned long);
+extern void sys_rt_sigreturn(struct pt_regs *, int);
+extern void do_notify_resume(struct pt_regs *, long);
+extern long do_syscall_trace_enter(struct pt_regs *);
+extern void do_syscall_trace_exit(struct pt_regs *);
+
+/* CPU startup and info */
+struct seq_file;
+extern void early_trap_init(void);
+extern void collect_boot_cpu_data(void);
+extern void btlb_init_per_cpu(void);
+extern int show_cpuinfo (struct seq_file *m, void *v);
+
+/* driver code in driver/parisc */
+extern void processor_init(void);
+struct parisc_device;
+struct resource;
+extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
+extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
+extern void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask);
+extern void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
+
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h
index 46921ffcc407..9140e1ab7e63 100644
--- a/arch/parisc/include/asm/psw.h
+++ b/arch/parisc/include/asm/psw.h
@@ -60,7 +60,7 @@
#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* The program status word as bitfields. */
struct pa_psw {
@@ -99,6 +99,6 @@ struct pa_psw {
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
index 8e51c775c80a..e2d2d7e9bfde 100644
--- a/arch/parisc/include/asm/ropes.h
+++ b/arch/parisc/include/asm/ropes.h
@@ -29,7 +29,7 @@
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
@@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
+/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
+extern struct sba_device *sba_list;
+
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
@@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) {
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
-#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
+#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
@@ -252,7 +255,7 @@ static inline int agp_mode_mercury(void __iomem *hpa) {
** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/
-extern void *iosapic_register(unsigned long hpa);
+extern void *iosapic_register(unsigned long hpa, void __iomem *vaddr);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
#define LBA_FUNC_ID 0x0000 /* function id */
diff --git a/arch/parisc/include/asm/runway.h b/arch/parisc/include/asm/runway.h
index 5cf061376ddb..2837f0223d6d 100644
--- a/arch/parisc/include/asm/runway.h
+++ b/arch/parisc/include/asm/runway.h
@@ -2,9 +2,6 @@
#ifndef ASM_PARISC_RUNWAY_H
#define ASM_PARISC_RUNWAY_H
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_runway_root;
-
#define RUNWAY_STATUS 0x10
#define RUNWAY_DEBUG 0x40
diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h
index 74f74e4d35b7..5a95b0f62b87 100644
--- a/arch/parisc/include/asm/shmparam.h
+++ b/arch/parisc/include/asm/shmparam.h
@@ -2,6 +2,21 @@
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
+/*
+ * PA-RISC uses virtually indexed & physically tagged (VIPT) caches
+ * which has strict requirements when two pages to the same physical
+ * address are accessed through different mappings. Read the section
+ * "Address Aliasing" in the arch docs for more detail:
+ * PA-RISC 1.1 (page 3-6):
+ * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
+ * PA-RISC 2.0 (page F-5):
+ * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
+ *
+ * For Linux we allow kernel and userspace to map pages on page size
+ * granularity (SHMLBA) but have to ensure that, if two pages are
+ * mapped to the same physical address, the virtual and physical
+ * addresses modulo SHM_COLOUR are identical.
+ */
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
index 715c96ba2ec8..85c3d7409bbc 100644
--- a/arch/parisc/include/asm/signal.h
+++ b/arch/parisc/include/asm/signal.h
@@ -4,24 +4,12 @@
#include <uapi/asm/signal.h>
-#define _NSIG 64
-/* bits-per-word, where word apparently means 'long' not 'int' */
-#define _NSIG_BPW BITS_PER_LONG
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-# ifndef __ASSEMBLY__
+# ifndef __ASSEMBLER__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- /* next_signal() assumes this is a long - no choice */
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
#include <asm/sigcontext.h>
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PARISC_SIGNAL_H */
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 94d1f21ce99a..0cf1c3a2696a 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -12,7 +12,7 @@ extern int init_per_cpu(int cpuid);
#define PDC_OS_BOOT_RENDEZVOUS 0x10
#define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
-#ifndef ASSEMBLY
+#ifndef __ASSEMBLER__
#include <linux/bitops.h>
#include <linux/threads.h> /* for NR_CPUS */
#include <linux/cpumask.h>
@@ -34,7 +34,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* !ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#else /* CONFIG_SMP */
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index c822bd0c0e3c..1013eeba31e5 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -8,7 +8,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%1),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
@@ -22,7 +23,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
@@ -30,6 +32,34 @@
pa; \
})
+/**
+ * prober_user() - Probe user read access
+ * @sr: Space regster.
+ * @va: Virtual address.
+ *
+ * Return: Non-zero if address is accessible.
+ *
+ * Due to the way _PAGE_READ is handled in TLB entries, we need
+ * a special check to determine whether a user address is accessible.
+ * The ldb instruction does the initial access check. If it is
+ * successful, the probe instruction checks user access rights.
+ */
+#define prober_user(sr, va) ({ \
+ unsigned long read_allowed; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tldb 0(%%sr%1,%2),%%r0\n" \
+ "\tproberi (%%sr%1,%2),%3,%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
+ : "=&r" (read_allowed) \
+ : "i" (sr), "r" (va), "i" (PRIV_USER) \
+ : "memory" \
+ ); \
+ read_allowed; \
+})
+
#define CR_EIEM 15 /* External Interrupt Enable Mask */
#define CR_CR16 16 /* CR16 Interval Timer */
#define CR_EIRR 23 /* External Interrupt Request Register */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index a6e5d66a7656..0b326e52255e 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -7,10 +7,24 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
+static inline void arch_spin_val_check(int lock_val)
+{
+ if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+ asm volatile( "andcm,= %0,%1,%%r0\n"
+ ".word %2\n"
+ : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
+ "i" (SPINLOCK_BREAK_INSN));
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
- volatile unsigned int *a = __ldcw_align(x);
- return READ_ONCE(*a) == 0;
+ volatile unsigned int *a;
+ int lock_val;
+
+ a = __ldcw_align(x);
+ lock_val = READ_ONCE(*a);
+ arch_spin_val_check(lock_val);
+ return (lock_val == 0);
}
static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -18,9 +32,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- while (__ldcw(a) == 0)
+ do {
+ int lock_val_old;
+
+ lock_val_old = __ldcw(a);
+ arch_spin_val_check(lock_val_old);
+ if (lock_val_old)
+ return; /* got lock */
+
+ /* wait until we should try to get lock again */
while (*a == 0)
continue;
+ } while (1);
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
@@ -29,15 +52,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
a = __ldcw_align(x);
/* Release with ordered store. */
- __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ __asm__ __volatile__("stw,ma %0,0(%1)"
+ : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
+ int lock_val;
a = __ldcw_align(x);
- return __ldcw(a) != 0;
+ lock_val = __ldcw(a);
+ arch_spin_val_check(lock_val);
+ return lock_val != 0;
}
/*
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index ca39ee350c3f..8e6889bc23cc 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -2,14 +2,17 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
+#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
+
+#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
+
+#ifndef __ASSEMBLER__
+
typedef struct {
-#ifdef CONFIG_PA20
- volatile unsigned int slock;
-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
-#else
volatile unsigned int lock[4];
-# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
-#endif
+# define __ARCH_SPIN_LOCK_UNLOCKED \
+ { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+ __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
} arch_spinlock_t;
@@ -23,6 +26,8 @@ typedef struct {
volatile unsigned int counter;
} arch_rwlock_t;
+#endif /* __ASSEMBLER__ */
+
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 00b127a5e09b..c11222798ab2 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -17,6 +17,13 @@ static inline long syscall_get_nr(struct task_struct *tsk,
return regs->gr[20];
}
+static inline void syscall_set_nr(struct task_struct *tsk,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->gr[20] = nr;
+}
+
static inline void syscall_get_arguments(struct task_struct *tsk,
struct pt_regs *regs,
unsigned long *args)
@@ -29,6 +36,18 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
args[0] = regs->gr[26];
}
+static inline void syscall_set_arguments(struct task_struct *tsk,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ regs->gr[21] = args[5];
+ regs->gr[22] = args[4];
+ regs->gr[23] = args[3];
+ regs->gr[24] = args[2];
+ regs->gr[25] = args[1];
+ regs->gr[26] = args[0];
+}
+
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/parisc/include/asm/patch.h b/arch/parisc/include/asm/text-patching.h
index 400d84c6e504..400d84c6e504 100644
--- a/arch/parisc/include/asm/patch.h
+++ b/arch/parisc/include/asm/text-patching.h
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 1a58795f785c..b283738bb6da 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_THREAD_INFO_H
#define _ASM_PARISC_THREAD_INFO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <asm/special_insns.h>
@@ -20,7 +20,7 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
/* thread information allocation */
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 0ccdb738a9a3..10c8fb68e404 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -4,7 +4,7 @@
#define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
struct pt_regs;
/* traps.c */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 2bf660eabe42..6c531d2c847e 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
*/
#include <asm/page.h>
#include <asm/cache.h>
+#include <asm/extable.h>
#include <linux/bug.h>
#include <linux/string.h>
@@ -26,36 +27,6 @@
#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
#endif
-/*
- * The exception table contains two values: the first is the relative offset to
- * the address of the instruction that is allowed to fault, and the second is
- * the relative offset to the address of the fixup routine. Since relative
- * addresses are used, 32bit values are sufficient even on 64bit kernel.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-struct exception_table_entry {
- int insn; /* relative address of insn that is allowed to fault. */
- int fixup; /* relative address of fixup routine */
-};
-
-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
- ".section __ex_table,\"aw\"\n" \
- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
- ".previous\n"
-
-/*
- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
- * (with lowest bit set) for which the fault handler in fixup_exception() will
- * load -EFAULT into %r29 for a read or write fault, and zeroes the target
- * register in case of a read fault in get_user().
- */
-#define ASM_EXCEPTIONTABLE_REG 29
-#define ASM_EXCEPTIONTABLE_VAR(__variable) \
- register long __variable __asm__ ("r29") = 0
-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
- ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
-
#define __get_user_internal(sr, val, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__gu_err); \
@@ -71,9 +42,24 @@ struct exception_table_entry {
__gu_err; \
})
-#define __get_user(val, ptr) \
-({ \
- __get_user_internal(SR_USER, val, ptr); \
+#define __probe_user_internal(sr, error, ptr) \
+({ \
+ __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \
+ "\tcmpiclr,= 1,%0,%0\n" \
+ "\tldi %4,%0\n" \
+ : "=r"(error) \
+ : "i"(sr), "r"(ptr), "i"(PRIV_USER), \
+ "i"(-EFAULT)); \
+})
+
+#define __get_user(val, ptr) \
+({ \
+ register long __gu_err; \
+ \
+ __gu_err = __get_user_internal(SR_USER, val, ptr); \
+ if (likely(!__gu_err)) \
+ __probe_user_internal(SR_USER, __gu_err, ptr); \
+ __gu_err; \
})
#define __get_user_asm(sr, val, ldx, ptr) \
@@ -82,7 +68,7 @@ struct exception_table_entry {
\
__asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
: "=r"(__gu_val), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@@ -114,8 +100,8 @@ struct exception_table_entry {
"1: ldw 0(%%sr%2,%3),%0\n" \
"2: ldw 4(%%sr%2,%3),%R0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \
: "=&r"(__gu_tmp.l), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@@ -173,7 +159,7 @@ struct exception_table_entry {
__asm__ __volatile__ ( \
"1: " stx " %1,0(%%sr%2,%3)\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(x), "i"(sr), "r"(ptr))
@@ -185,15 +171,14 @@ struct exception_table_entry {
"1: stw %1,0(%%sr%2,%3)\n" \
"2: stw %R1,4(%%sr%2,%3)\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(__val), "i"(sr), "r"(ptr)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
-
/*
* Complex access routines -- external declarations
*/
@@ -215,7 +200,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-struct pt_regs;
-int fixup_exception(struct pt_regs *regs);
-
#endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h
deleted file mode 100644
index c0621295100d..000000000000
--- a/arch/parisc/include/asm/unaligned.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_PARISC_UNALIGNED_H
-#define _ASM_PARISC_UNALIGNED_H
-
-#include <asm-generic/unaligned.h>
-
-struct pt_regs;
-void handle_unaligned(struct pt_regs *regs);
-int check_unaligned(struct pt_regs *regs);
-
-#endif /* _ASM_PARISC_UNALIGNED_H */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index e38f9a90ac15..3e46c6ea9df6 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -6,7 +6,7 @@
#define __NR_Linux_syscalls __NR_syscalls
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define SYS_ify(syscall_name) __NR_##syscall_name
@@ -20,7 +20,7 @@
* sysdeps/unix/sysv/linux/hppa/sysdep.h
*/
-#ifdef PIC
+#ifndef DONT_USE_PIC
/* WARNING: CANNOT BE USED IN A NOP! */
# define K_STW_ASM_PIC " copy %%r19, %%r4\n"
# define K_LDW_ASM_PIC " copy %%r4, %%r19\n"
@@ -43,7 +43,7 @@
across the syscall. */
#define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \
- "%r20", "%r29", "%r31"
+ "%r20", "%r29", "%r31"
#undef K_INLINE_SYSCALL
#define K_INLINE_SYSCALL(name, nr, args...) ({ \
@@ -58,7 +58,7 @@
" ldi %1, %%r20\n" \
K_LDW_ASM_PIC \
: "=r" (__res) \
- : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \
+ : "i" (name) K_ASM_ARGS_##nr \
: "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \
); \
__sys_res = (long)__res; \
@@ -104,42 +104,18 @@
#define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25"
#define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26"
-#define _syscall0(type,name) \
-type name(void) \
-{ \
- return K_INLINE_SYSCALL(name, 0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
- return K_INLINE_SYSCALL(name, 1, arg1); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1, type2 arg2) \
-{ \
- return K_INLINE_SYSCALL(name, 2, arg1, arg2); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1, type2 arg2, type3 arg3) \
-{ \
- return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
- return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \
-}
-
-/* select takes 5 arguments */
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
- return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \
-}
+#define syscall0(name) \
+ K_INLINE_SYSCALL(name, 0)
+#define syscall1(name, arg1) \
+ K_INLINE_SYSCALL(name, 1, arg1)
+#define syscall2(name, arg1, arg2) \
+ K_INLINE_SYSCALL(name, 2, arg1, arg2)
+#define syscall3(name, arg1, arg2, arg3) \
+ K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3)
+#define syscall4(name, arg1, arg2, arg3, arg4) \
+ K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4)
+#define syscall5(name, arg1, arg2, arg3, arg4, arg5) \
+ K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5)
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
@@ -160,7 +136,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#define __ARCH_WANT_COMPAT_STAT
@@ -169,7 +144,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_UTIME
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#undef STR
diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h
index ef8206193f82..81bc1d42802a 100644
--- a/arch/parisc/include/asm/vdso.h
+++ b/arch/parisc/include/asm/vdso.h
@@ -2,7 +2,7 @@
#ifndef __PARISC_VDSO_H__
#define __PARISC_VDSO_H__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_64BIT
#include <generated/vdso64-offsets.h>
@@ -12,13 +12,11 @@
#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-extern struct vdso_data *vdso_data;
-
-#endif /* __ASSEMBLY __ */
+#endif /* __ASSEMBLER__ */
/* Default link addresses for the vDSOs */
#define VDSO_LBASE 0
-#define VDSO_VERSION_STRING LINUX_5.18
+#define VDSO_VERSION_STRING LINUX_6.11
#endif /* __PARISC_VDSO_H__ */
diff --git a/arch/parisc/include/asm/video.h b/arch/parisc/include/asm/video.h
new file mode 100644
index 000000000000..a9d50ebd6e76
--- /dev/null
+++ b/arch/parisc/include/asm/video.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VIDEO_H_
+#define _ASM_VIDEO_H_
+
+#include <linux/types.h>
+
+struct device;
+
+#if defined(CONFIG_STI_CORE) && defined(CONFIG_VIDEO)
+bool video_is_primary_device(struct device *dev);
+#define video_is_primary_device video_is_primary_device
+#endif
+
+#include <asm-generic/video.h>
+
+#endif /* _ASM_VIDEO_H_ */
diff --git a/arch/parisc/include/uapi/asm/cachectl.h b/arch/parisc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..68d6b455498b
--- /dev/null
+++ b/arch/parisc/include/uapi/asm/cachectl.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
+
+/*
+ * Options for cacheflush system call
+ */
+#define ICACHE (1<<0) /* flush instruction cache */
+#define DCACHE (1<<1) /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+#endif /* _ASM_CACHECTL */
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index 87245c584784..8d94739d75c6 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -75,7 +75,6 @@
/* We now return you to your regularly scheduled HPUX. */
-#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */
@@ -101,7 +100,6 @@
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
-#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h
index 82d1148c6379..74b4027a4e80 100644
--- a/arch/parisc/include/uapi/asm/ioctls.h
+++ b/arch/parisc/include/uapi/asm/ioctls.h
@@ -10,10 +10,10 @@
#define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */
#define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */
#define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */
-#define TCGETA _IOR('T', 1, struct termio)
-#define TCSETA _IOW('T', 2, struct termio)
-#define TCSETAW _IOW('T', 3, struct termio)
-#define TCSETAF _IOW('T', 4, struct termio)
+#define TCGETA 0x40125401
+#define TCSETA 0x80125402
+#define TCSETAW 0x80125403
+#define TCSETAF 0x80125404
#define TCSBRK _IO('T', 5)
#define TCXONC _IO('T', 6)
#define TCFLSH _IO('T', 7)
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index 68c44f99bc93..b6a709506987 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -75,6 +75,9 @@
#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
+#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
+#define MADV_GUARD_REMOVE 103 /* unguard range */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 7a90070136e8..65031ddf8372 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -58,8 +58,8 @@
#define PDC_MODEL_NVA_SUPPORTED (0 << 4)
#define PDC_MODEL_NVA_SLOW (1 << 4)
#define PDC_MODEL_NVA_UNSUPPORTED (3 << 4)
-#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
-#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
+#define PDC_MODEL_FIRM_TEST_GET 8 /* returns boot test options */
+#define PDC_MODEL_FIRM_TEST_SET 9 /* set boot test options */
#define PDC_MODEL_GET_PLATFORM_INFO 10 /* returns platform info */
#define PDC_MODEL_GET_INSTALL_KERNEL 11 /* returns kernel for installation */
@@ -361,7 +361,7 @@
/* size of the pdc_result buffer for firmware.c */
#define NUM_PDC_RESULT 32
-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLER__)
/* flags for hardware_path */
#define PF_AUTOBOOT 0x80
@@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
unsigned long arch_rev;
unsigned long pot_key;
unsigned long curr_key;
+ unsigned long width; /* default of PSW_W bit (1=enabled) */
};
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
@@ -609,6 +610,12 @@ struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
unsigned long mod_pgs;
};
+struct pdc_relocate_info_block { /* PDC_RELOCATE_INFO */
+ unsigned long pdc_size;
+ unsigned long pdc_alignment;
+ unsigned long pdc_address;
+};
+
struct pdc_initiator { /* PDC_INITIATOR */
int host_id;
int factor;
@@ -717,6 +724,23 @@ struct pdc_toc_pim_20 {
struct pim_cpu_state_cf cpu_state;
};
-#endif /* !defined(__ASSEMBLY__) */
+/* for SpeedyBoot/firm_ctl funtionality */
+struct pdc_firm_test_get_rtn_block { /* PDC_MODEL/PDC_FIRM_TEST_GET */
+ unsigned long current_tests; /* u_R_addr Raddr_ints[0] */
+ unsigned long tests_supported; /* u_R_addr Raddr_ints[1] */
+ unsigned long default_tests; /* u_R_addr Raddr_ints[2] */
+};
+
+#define TORNADO_CPU_ID 0xB
+#define PCXL_CPU_ID 0xD
+#define PCXU_CPU_ID 0xE /* U and U+ for all but C-class with bug */
+#define VR_CPU_ID 0xF
+#define PCXU_PLUS_CPU_ID 0x10 /* U+ only on C-class with bug */
+#define PCXW_CPU_ID 0x11
+#define PCXW_PLUS_CPU_ID 0x12
+#define PIRANHA_CPU_ID 0x13
+#define MAKO_CPU_ID 0x14
+
+#endif /* !defined(__ASSEMBLER__) */
#endif /* _UAPI_PARISC_PDC_H */
diff --git a/arch/parisc/include/uapi/asm/perf_regs.h b/arch/parisc/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..1ae687bb3d3c
--- /dev/null
+++ b/arch/parisc/include/uapi/asm/perf_regs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_PARISC_PERF_REGS_H
+#define _UAPI_ASM_PARISC_PERF_REGS_H
+
+/* see struct user_regs_struct */
+enum perf_event_parisc_regs {
+ PERF_REG_PARISC_R0, /* PSW is in gr[0] */
+ PERF_REG_PARISC_R1,
+ PERF_REG_PARISC_R2,
+ PERF_REG_PARISC_R3,
+ PERF_REG_PARISC_R4,
+ PERF_REG_PARISC_R5,
+ PERF_REG_PARISC_R6,
+ PERF_REG_PARISC_R7,
+ PERF_REG_PARISC_R8,
+ PERF_REG_PARISC_R9,
+ PERF_REG_PARISC_R10,
+ PERF_REG_PARISC_R11,
+ PERF_REG_PARISC_R12,
+ PERF_REG_PARISC_R13,
+ PERF_REG_PARISC_R14,
+ PERF_REG_PARISC_R15,
+ PERF_REG_PARISC_R16,
+ PERF_REG_PARISC_R17,
+ PERF_REG_PARISC_R18,
+ PERF_REG_PARISC_R19,
+ PERF_REG_PARISC_R20,
+ PERF_REG_PARISC_R21,
+ PERF_REG_PARISC_R22,
+ PERF_REG_PARISC_R23,
+ PERF_REG_PARISC_R24,
+ PERF_REG_PARISC_R25,
+ PERF_REG_PARISC_R26,
+ PERF_REG_PARISC_R27,
+ PERF_REG_PARISC_R28,
+ PERF_REG_PARISC_R29,
+ PERF_REG_PARISC_R30,
+ PERF_REG_PARISC_R31,
+
+ PERF_REG_PARISC_SR0,
+ PERF_REG_PARISC_SR1,
+ PERF_REG_PARISC_SR2,
+ PERF_REG_PARISC_SR3,
+ PERF_REG_PARISC_SR4,
+ PERF_REG_PARISC_SR5,
+ PERF_REG_PARISC_SR6,
+ PERF_REG_PARISC_SR7,
+
+ PERF_REG_PARISC_IAOQ0,
+ PERF_REG_PARISC_IAOQ1,
+ PERF_REG_PARISC_IASQ0,
+ PERF_REG_PARISC_IASQ1,
+
+ PERF_REG_PARISC_SAR, /* CR11 */
+ PERF_REG_PARISC_IIR, /* CR19 */
+ PERF_REG_PARISC_ISR, /* CR20 */
+ PERF_REG_PARISC_IOR, /* CR21 */
+ PERF_REG_PARISC_IPSW, /* CR22 */
+
+ PERF_REG_PARISC_MAX
+};
+
+#endif /* _UAPI_ASM_PARISC_PERF_REGS_H */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 8e4895c5ea5d..d99accf37341 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -57,10 +57,20 @@
#include <asm-generic/signal-defs.h>
-# ifndef __ASSEMBLY__
+#define _NSIG 64
+#define _NSIG_BPW (sizeof(unsigned long) * 8)
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+# ifndef __ASSEMBLER__
# include <linux/types.h>
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
/* Avoid too many header ordering problems. */
struct siginfo;
@@ -70,5 +80,5 @@ typedef struct sigaltstack {
__kernel_size_t ss_size;
} stack_t;
-#endif /* !__ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
#endif /* _UAPI_ASM_PARISC_SIGNAL_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f486d3dfb6bb..c16ec36dfee6 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -129,6 +129,24 @@
#define SO_RCVMARK 0x4049
+#define SO_PASSPIDFD 0x404A
+#define SO_PEERPIDFD 0x404B
+
+#define SCM_TS_OPT_ID 0x404C
+
+#define SO_RCVPRIORITY 0x404D
+
+#define SO_DEVMEM_LINEAR 0x404E
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 0x404F
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 0x4050
+
+#define SO_PASSRIGHTS 0x4051
+
+#define SO_INQ 0x4052
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
index 933d031c249a..664c2d77f776 100755
--- a/arch/parisc/install.sh
+++ b/arch/parisc/install.sh
@@ -16,6 +16,8 @@
# $3 - kernel map file
# $4 - default install path (blank if root directory)
+set -e
+
if [ "$(basename $2)" = "vmlinuz" ]; then
# Compressed install
echo "Installing compressed kernel"
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 3d138c9cf9ce..9157bc8bdf41 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -3,10 +3,10 @@
# Makefile for arch/parisc/kernel
#
-extra-y := vmlinux.lds
+always-$(KBUILD_BUILTIN) := vmlinux.lds
obj-y := head.o cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
- pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
+ syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
@@ -21,6 +21,9 @@ CFLAGS_REMOVE_unwind.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
endif
+CFLAGS_REMOVE_sys_parisc.o = -Wmissing-prototypes -Wmissing-declarations
+CFLAGS_REMOVE_sys_parisc32.o = -Wmissing-prototypes -Wmissing-declarations
+
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
@@ -35,6 +38,7 @@ obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index 66f5672c70bd..25c4d6c3375d 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
{
struct alt_instr *entry;
int index = 0, applied = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = num_present_cpus();
u16 cond_check;
cond_check = ALT_COND_ALWAYS |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 94652e13c260..3de4b5933b10 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -13,6 +13,7 @@
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*/
+#define COMPILE_OFFSETS
#include <linux/types.h>
#include <linux/sched.h>
@@ -257,6 +258,8 @@ int main(void)
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
+ DEFINE(TIF_32BIT_PA_BIT, 31-TIF_32BIT);
+
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
@@ -275,6 +278,8 @@ int main(void)
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#elif !defined(CONFIG_64BIT)
+ DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
index f420b5552140..375cd73b5281 100644
--- a/arch/parisc/kernel/audit.c
+++ b/arch/parisc/kernel/audit.c
@@ -40,11 +40,6 @@ int audit_classify_arch(int arch)
int audit_classify_syscall(int abi, unsigned syscall)
{
-#ifdef CONFIG_COMPAT
- extern int parisc32_classify_syscall(unsigned);
- if (abi == AUDIT_ARCH_PARISC)
- return parisc32_classify_syscall(syscall);
-#endif
switch (syscall) {
case __NR_open:
return AUDITSC_OPEN;
@@ -55,6 +50,10 @@ int audit_classify_syscall(int abi, unsigned syscall)
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
+#ifdef CONFIG_COMPAT
+ if (abi == AUDIT_ARCH_PARISC)
+ return AUDITSC_COMPAT;
+#endif
return AUDITSC_NATIVE;
}
}
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 1d3b8bc8a623..4c5240d3a3c7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -19,6 +19,8 @@
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -28,21 +30,33 @@
#include <asm/sections.h>
#include <asm/shmparam.h>
#include <asm/mmu_context.h>
+#include <asm/cachectl.h>
+
+#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
+
+/*
+ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
+ * of page flushes done flush_cache_page_if_present. There are some
+ * pros and cons in using this option. It may increase the risk of
+ * random segmentation faults.
+ */
+#define CONFIG_FLUSH_PAGE_ACCESSED 0
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
+/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
-/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
+static void flush_kernel_dcache_page_addr(const void *addr);
+
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@@ -56,7 +70,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@@ -92,11 +106,11 @@ static inline void flush_data_cache(void)
/* Kernel virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
-void
-__update_cache(pte_t pte)
+void __update_cache(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
- struct page *page;
+ struct folio *folio;
+ unsigned int nr;
/* We don't have pte special. As a result, we can be called with
an invalid pfn and we don't need to flush the kernel dcache page.
@@ -104,13 +118,17 @@ __update_cache(pte_t pte)
if (!pfn_valid(pfn))
return;
- page = pfn_to_page(pfn);
- if (page_mapping_file(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
- flush_kernel_dcache_page_addr(pfn_va(pfn));
- clear_bit(PG_dcache_dirty, &page->flags);
+ folio = page_folio(pfn_to_page(pfn));
+ pfn = folio_pfn(folio);
+ nr = folio_nr_pages(folio);
+ if (folio_flush_mapping(folio) &&
+ test_bit(PG_dcache_dirty, &folio->flags.f)) {
+ while (nr--)
+ flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
+ clear_bit(PG_dcache_dirty, &folio->flags.f);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(pfn_va(pfn));
+ while (nr--)
+ flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
}
void
@@ -258,11 +276,9 @@ parisc_cache_init(void)
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
-#ifndef CONFIG_PA20
- if (pdc_btlb_info(&btlb_info) < 0) {
- memset(&btlb_info, 0, sizeof btlb_info);
- }
-#endif
+ /* stride needs to be non-zero, otherwise cache flushes will not work */
+ WARN_ON(cache_info.dc_size && dcache_stride == 0);
+ WARN_ON(cache_info.ic_size && icache_stride == 0);
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
@@ -317,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
+
+ /*
+ * The TLB is the engine of coherence on parisc. The CPU is
+ * entitled to speculate any page with a TLB mapping, so here
+ * we kill the mapping then flush the page along a special flush
+ * only alias mapping. This guarantees that the page is no-longer
+ * in the cache for any process and nor may it be speculatively
+ * read in (until the user or kernel specifically accesses it,
+ * of course).
+ */
+ flush_tlb_page(vma, vmaddr);
+
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@@ -324,46 +352,61 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+static void flush_kernel_dcache_page_addr(const void *addr)
{
- unsigned long flags, space, pgd, prot;
-#ifdef CONFIG_TLB_PTLOCK
- unsigned long pgd_lock;
-#endif
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- vmaddr &= PAGE_MASK;
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
+ flush_dcache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+}
- /* Set context for flush */
- local_irq_save(flags);
- prot = mfctl(8);
- space = mfsp(SR_USER);
- pgd = mfctl(25);
-#ifdef CONFIG_TLB_PTLOCK
- pgd_lock = mfctl(28);
-#endif
- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
- local_irq_restore(flags);
-
- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- flush_tlb_page(vma, vmaddr);
+static void flush_kernel_icache_page_addr(const void *addr)
+{
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- /* Restore previous context */
- local_irq_save(flags);
-#ifdef CONFIG_TLB_PTLOCK
- mtctl(pgd_lock, 28);
-#endif
- mtctl(pgd, 25);
- mtsp(space, SR_USER);
- mtctl(prot, 8);
- local_irq_restore(flags);
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent instruction cache move-in */
+ preempt_disable();
+ flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
+void kunmap_flush_on_unmap(const void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_flush_on_unmap);
+
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr)
+{
+ void *kaddr = page_address(page);
+
+ for (;;) {
+ flush_kernel_dcache_page_addr(kaddr);
+ flush_kernel_icache_page_addr(kaddr);
+ if (--nr == 0)
+ break;
+ kaddr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Walk page directory for MM to find PTEP pointer for address ADDR.
+ */
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -386,32 +429,71 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
return ptep;
}
-static inline bool pte_needs_flush(pte_t pte)
+static inline bool pte_needs_cache_flush(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
-void flush_dcache_page(struct page *page)
+/*
+ * Return user physical address. Returns 0 if page is not present.
+ */
+static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags, space, pgd, prot, pa;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ /* Save context */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+
+ /* Set context for lpa_user */
+ switch_mm_irqs_off(NULL, mm, NULL);
+ pa = lpa_user(addr);
+
+ /* Restore previous context */
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
+ return pa;
+}
+
+void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping = page_mapping_file(page);
- struct vm_area_struct *mpnt;
- unsigned long offset;
+ struct address_space *mapping = folio_flush_mapping(folio);
+ struct vm_area_struct *vma;
unsigned long addr, old_addr = 0;
+ void *kaddr;
unsigned long count = 0;
+ unsigned long i, nr, flags;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
+ set_bit(PG_dcache_dirty, &folio->flags.f);
return;
}
- flush_kernel_dcache_page_addr(page_address(page));
+ nr = folio_nr_pages(folio);
+ kaddr = folio_address(folio);
+ for (i = 0; i < nr; i++)
+ flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
if (!mapping)
return;
- pgoff = page->index;
+ pgoff = folio->index;
/*
* We have carefully arranged in arch_get_unmapped_area() that
@@ -420,49 +502,45 @@ void flush_dcache_page(struct page *page)
* to flush one address here for them all to become coherent
* on machines that support equivalent aliasing
*/
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
- offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- addr = mpnt->vm_start + offset;
- if (parisc_requires_coherency()) {
- pte_t *ptep;
-
- ptep = get_ptep(mpnt->vm_mm, addr);
- if (ptep && pte_needs_flush(*ptep))
- flush_user_cache_page(mpnt, addr);
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
+ unsigned long offset = pgoff - vma->vm_pgoff;
+ unsigned long pfn = folio_pfn(folio);
+
+ addr = vma->vm_start;
+ nr = folio_nr_pages(folio);
+ if (offset > -nr) {
+ pfn -= offset;
+ nr += offset;
} else {
+ addr += offset * PAGE_SIZE;
+ }
+ if (addr + nr * PAGE_SIZE > vma->vm_end)
+ nr = (vma->vm_end - addr) / PAGE_SIZE;
+
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
- * The TLB is the engine of coherence on parisc:
- * The CPU is entitled to speculate any page
- * with a TLB mapping, so here we kill the
- * mapping then flush the page along a special
- * flush only alias mapping. This guarantees that
- * the page is no-longer in the cache for any
- * process and nor may it be speculatively read
- * in (until the user or kernel specifically
- * accesses it, of course)
+ * Software is allowed to have any number
+ * of private mappings to a page.
*/
- flush_tlb_page(mpnt, addr);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- __flush_cache_page(mpnt, addr, page_to_phys(page));
- /*
- * Software is allowed to have any number
- * of private mappings to a page.
- */
- if (!(mpnt->vm_flags & VM_SHARED))
- continue;
- if (old_addr)
- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, mpnt->vm_file);
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
old_addr = addr;
- }
}
WARN_ON(++count == 4096);
}
- flush_dcache_mmap_unlock(mapping);
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
@@ -533,11 +611,7 @@ void __init parisc_setup_cache_timing(void)
threshold/1024);
set_tlb_threshold:
- if (threshold > FLUSH_TLB_THRESHOLD)
- parisc_tlb_flush_threshold = threshold;
- else
- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
-
+ parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
@@ -546,29 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
-void flush_kernel_dcache_page_addr(const void *addr)
-{
- unsigned long flags;
-
- flush_kernel_dcache_page_asm(addr);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-
static void flush_cache_page_if_present(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn)
+ unsigned long vmaddr)
{
- pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
-
- /*
- * The pte check is racy and sometimes the flush will trigger
- * a non-access TLB miss. Hopefully, the page has already been
- * flushed.
- */
- if (ptep && pte_needs_flush(*ptep))
- flush_cache_page(vma, vmaddr, pfn);
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ bool needs_flush = false;
+ pte_t *ptep, pte;
+
+ ptep = get_ptep(vma->vm_mm, vmaddr);
+ if (ptep) {
+ pte = ptep_get(ptep);
+ needs_flush = pte_needs_cache_flush(pte);
+ pte_unmap(ptep);
+ }
+ if (needs_flush)
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
+#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physaddr = get_upa(mm, vmaddr);
+
+ if (physaddr)
+ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
+#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@@ -578,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@@ -587,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@@ -630,27 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- pte_t *ptep;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- /*
- * The vma can contain pages that aren't present. Although
- * the pte search is expensive, we need the pte to find the
- * page pfn and to check whether the page should be flushed.
- */
- ptep = get_ptep(vma->vm_mm, addr);
- if (ptep && pte_needs_flush(*ptep)) {
- if (parisc_requires_coherency()) {
- flush_user_cache_page(vma, addr);
- } else {
- pfn = pte_pfn(*ptep);
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@@ -701,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
- flush_cache_all();
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_all();
+ else
+ flush_data_cache();
return;
}
- flush_cache_pages(vma, start, end);
+ flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- if (parisc_requires_coherency())
- flush_user_cache_page(vma, vmaddr);
- else
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -723,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
- if (parisc_requires_coherency()) {
- if (vma->vm_flags & VM_SHARED)
- flush_data_cache();
- else
- flush_user_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
+#endif
+ return 1;
+}
+
+/*
+ * After a PTE is cleared, we have no way to flush the cache for
+ * the physical page. On PA8800 and PA8900 processors, these lines
+ * can cause random cache corruption. Thus, we must flush the cache
+ * as well as the TLB when clearing a PTE that's valid.
+ */
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ else if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, addr);
+
+ return pte;
+}
+
+/*
+ * The physical address for pages in the ioremap case can be obtained
+ * from the vm_struct struct. I wasn't able to successfully handle the
+ * vmalloc and vmap cases. We have an array of struct page pointers in
+ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
+ */
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long addr, physaddr;
+ struct vm_struct *vm;
+
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+
+ if (end - start >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
+ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
+ flush_cache_all();
+ return;
+ }
+
+ vm = find_vm_area((void *)start);
+ if (!vm) {
+ flush_cache_all();
+ return;
+ }
+
+ /* The physical addresses of IOREMAP regions are contiguous */
+ if (vm->flags & VM_IOREMAP) {
+ physaddr = vm->phys_addr;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, start);
+ flush_icache_page_asm(physaddr, start);
+ preempt_enable();
+ physaddr += PAGE_SIZE;
+ }
+ return;
+ }
+
+ flush_cache_all();
+}
+EXPORT_SYMBOL(flush_cache_vmap);
+
+/*
+ * The vm_struct has been retired and the page table is set up. The
+ * last page in the range is a guard page. Its physical address can't
+ * be determined using lpa, so there is no way to flush the range
+ * using flush_dcache_page_asm.
+ */
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
+EXPORT_SYMBOL(flush_cache_vunmap);
+/*
+ * On systems with PA8800/PA8900 processors, there is no way to flush
+ * a vmap range other than using the architected loop to flush the
+ * entire cache. The page directory is not set up, so we can't use
+ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
+ * L2 is physically indexed but FDCE/FICE instructions in virtual
+ * mode output their virtual address on the core bus, not their
+ * real address. As a result, the L2 cache index formed from the
+ * virtual address will most likely not be the same as the L2 index
+ * formed from the real address.
+ */
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -762,14 +916,64 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+
+
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ unsigned int, cache)
+{
+ unsigned long start, end;
+ ASM_EXCEPTIONTABLE_VAR(error);
+
+ if (bytes == 0)
+ return 0;
+ if (!access_ok((void __user *) addr, bytes))
+ return -EFAULT;
+
+ end = addr + bytes;
+
+ if (cache & DCACHE) {
+ start = addr;
+ __asm__ __volatile__ (
+#ifdef CONFIG_64BIT
+ "1: cmpb,*<<,n %0,%2,1b\n"
+#else
+ "1: cmpb,<<,n %0,%2,1b\n"
+#endif
+ " fic,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (dcache_stride), "i" (SR_USER));
+ }
+
+ if (cache & ICACHE && error == 0) {
+ start = addr;
+ __asm__ __volatile__ (
+#ifdef CONFIG_64BIT
+ "1: cmpb,*<<,n %0,%2,1b\n"
+#else
+ "1: cmpb,<<,n %0,%2,1b\n"
+#endif
+ " fdc,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (icache_stride), "i" (SR_USER));
+ }
+
+ return error;
+}
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
index 539b16891bdf..3ac53f1ab860 100644
--- a/arch/parisc/kernel/compat_audit.c
+++ b/arch/parisc/kernel/compat_audit.c
@@ -26,19 +26,3 @@ unsigned int parisc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
-
-int parisc32_classify_syscall(unsigned syscall)
-{
- switch (syscall) {
- case __NR_open:
- return AUDITSC_OPEN;
- case __NR_openat:
- return AUDITSC_OPENAT;
- case __NR_execve:
- return AUDITSC_EXECVE;
- case __NR_openat2:
- return AUDITSC_OPENAT2;
- default:
- return AUDITSC_COMPAT;
- }
-}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index e7ee0c0c91d3..8d23fe42b0ce 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
- * Copyright (c) 2001 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001-2023 Helge Deller <deller@gmx.de>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
*
@@ -74,13 +74,13 @@ static int descend_children(struct device * dev, void * data)
}
/**
- * for_each_padev - Iterate over all devices in the tree
- * @fn: Function to call for each device.
- * @data: Data to pass to the called function.
+ * for_each_padev - Iterate over all devices in the tree
+ * @fn: Function to call for each device.
+ * @data: Data to pass to the called function.
*
- * This performs a depth-first traversal of the tree, calling the
- * function passed for each node. It calls the function for parents
- * before children.
+ * This performs a depth-first traversal of the tree, calling the
+ * function passed for each node. It calls the function for parents
+ * before children.
*/
static int for_each_padev(int (*fn)(struct device *, void *), void * data)
@@ -97,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data)
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
-static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+static int match_device(const struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
@@ -280,7 +280,7 @@ int __init machine_has_merced_bus(void)
/**
* find_pa_parent_type - Find a parent of a specific type
- * @dev: The device to start searching from
+ * @padev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
@@ -344,8 +344,8 @@ static char *print_hwpath(struct hardware_path *path, char *output)
/**
* print_pa_hwpath - Returns hardware path for PA devices
- * dev: The device to return the path for
- * output: Pointer to a previously-allocated array to place the path in.
+ * @dev: The device to return the path for
+ * @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
@@ -379,8 +379,8 @@ EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
- * dev: The device to return the path for
- * output: Pointer to a previously-allocated array to place the path in.
+ * @dev: The device to return the path for
+ * @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
@@ -415,7 +415,8 @@ static void setup_bus_id(struct parisc_device *padev)
dev_set_name(&padev->dev, name);
}
-struct parisc_device * __init create_tree_node(char id, struct device *parent)
+static struct parisc_device * __init create_tree_node(char id,
+ struct device *parent)
{
struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -547,12 +548,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
return dev;
}
-static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+static int parisc_generic_match(struct device *dev, const struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
-static ssize_t make_modalias(struct device *dev, char *buf)
+static ssize_t make_modalias(const struct device *dev, char *buf)
{
const struct parisc_device *padev = to_parisc_device(dev);
const struct parisc_device_id *id = &padev->id;
@@ -562,7 +563,7 @@ static ssize_t make_modalias(struct device *dev, char *buf)
(u32)id->sversion);
}
-static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int parisc_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct parisc_device *padev;
char modalias[40];
@@ -617,7 +618,7 @@ static struct attribute *parisc_device_attrs[] = {
};
ATTRIBUTE_GROUPS(parisc_device);
-struct bus_type parisc_bus_type = {
+const struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.uevent = parisc_uevent,
@@ -741,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
};
if (device_for_each_child(parent, &recurse_data, descend_children))
- /* nothing */;
+ { /* nothing */ }
return d.dev;
}
@@ -771,8 +772,8 @@ EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
- * @param dev the target device
- * @param path pointer to a previously allocated hwpath struct to be filled in
+ * @dev: the target device
+ * @path: pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
@@ -924,10 +925,10 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
- pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
- "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
- p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+ pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
+ "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]);
#undef p
pr_info("#define PARISC_PDC_VERSION 0x%04lx\n\n",
@@ -994,6 +995,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
struct pdc_system_map_mod_info pdc_mod_info;
struct pdc_module_path mod_path;
+ memset(&iodc_data, 0, sizeof(iodc_data));
status = pdc_iodc_read(&count, hpa, 0,
&iodc_data, sizeof(iodc_data));
if (status != PDC_OK) {
@@ -1003,11 +1005,19 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
pr_info("\n");
+ /* Prevent hung task messages when printing on serial console */
+ cond_resched();
+
pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
hpa, parisc_hardware_description(&dev->id));
mod_index = 0;
do {
+ /* initialize device path for old machines */
+ memset(&mod_path, 0xff, sizeof(mod_path));
+ get_node_path(dev->dev.parent, &mod_path.path);
+ mod_path.path.mod = dev->hw_path;
+ memset(&pdc_mod_info, 0, sizeof(pdc_mod_info));
status = pdc_system_map_find_mods(&pdc_mod_info,
&mod_path, mod_index++);
} while (status == PDC_OK && pdc_mod_info.mod_addr != hpa);
@@ -1033,11 +1043,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
(unsigned char)mod_path.path.bc[3],
(unsigned char)mod_path.path.bc[4],
(unsigned char)mod_path.path.bc[5]);
- pr_cont(".mod = 0x%x ", mod_path.path.mod);
- pr_cont(" },\n");
- pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
- mod_path.layers[0], mod_path.layers[1], mod_path.layers[2],
- mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]);
+ pr_cont(".mod = 0x%x }\n", mod_path.path.mod);
pr_cont("};\n");
pr_info("static struct pdc_iodc iodc_data_hpa_%08lx = {\n", hpa);
@@ -1057,8 +1063,6 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
DO(checksum);
DO(length);
#undef DO
- pr_cont("\t/* pad: 0x%04x, 0x%04x */\n",
- iodc_data.pad[0], iodc_data.pad[1]);
pr_cont("};\n");
pr_info("#define HPA_%08lx_num_addr %d\n", hpa, dev->num_addrs);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 0e5ebfe8d9d2..e04c5d806c10 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -25,6 +25,7 @@
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <asm/alternative.h>
+#include <asm/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
@@ -35,6 +36,24 @@
.level 2.0
#endif
+/*
+ * We need seven instructions after a TLB insert for it to take effect.
+ * The PA8800/PA8900 processors are an exception and need 12 instructions.
+ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
+ */
+#ifdef CONFIG_64BIT
+#define NUM_PIPELINE_INSNS 12
+#else
+#define NUM_PIPELINE_INSNS 7
+#endif
+
+ /* Insert num nops */
+ .macro insert_nops num
+ .rept \num
+ nop
+ .endr
+ .endm
+
/* Get aligned page_table_lock address for this mm from cr28/tr4 */
.macro get_ptl reg
mfctl %cr28,\reg
@@ -406,7 +425,7 @@
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
b \fault
- stw \spc,0(\tmp)
+ stw \tmp1,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
2: LDREG 0(\ptp),\pte
@@ -414,26 +433,20 @@
3:
.endm
- /* Release page_table_lock without reloading lock address.
- Note that the values in the register spc are limited to
- NR_SPACE_IDS (262144). Thus, the stw instruction always
- stores a nonzero value even when register spc is 64 bits.
- We use an ordered store to ensure all prior accesses are
- performed prior to releasing the lock. */
- .macro ptl_unlock0 spc,tmp
-#ifdef CONFIG_TLB_PTLOCK
-98: or,COND(=) %r0,\spc,%r0
- stw,ma \spc,0(\tmp)
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- .endm
-
- /* Release page_table_lock. */
- .macro ptl_unlock1 spc,tmp
+ /* Release page_table_lock if for user space. We use an ordered
+ store to ensure all prior accesses are performed prior to
+ releasing the lock. Note stw may not be executed, so we
+ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
+ .macro ptl_unlock spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
98: get_ptl \tmp
- ptl_unlock0 \spc,\tmp
+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+ or,COND(=) %r0,\spc,%r0
+ stw,ma \tmp2,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ insert_nops NUM_PIPELINE_INSNS - 4
+#else
+ insert_nops NUM_PIPELINE_INSNS - 1
#endif
.endm
@@ -462,13 +475,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
+ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
@@ -476,8 +489,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
@@ -487,6 +499,12 @@
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
+
+#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+ /* need to drop DMB bit, as it's used as SPECIAL flag */
+ depi 0,_PAGE_SPECIAL_BIT,1,\pte
+#endif
+
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
* D <-> _PAGE_DIRTY
@@ -499,17 +517,18 @@
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
-#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
- /* need to drop DMB bit, as it's used as SPECIAL flag */
- depi 0,_PAGE_SPECIAL_BIT,1,\pte
-#endif
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
- * contains _PAGE_READ) */
+ * contains _PAGE_READ). While the kernel can't directly write
+ * user pages which have _PAGE_WRITE zero, it can read pages
+ * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel
+ * exception fault handler doesn't trigger when reading pages
+ * that aren't user read accessible */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
+
/* If we're a gateway page, drop PL2 back to zero for promotion
* to kernel privilege (so we can execute the page as kernel).
* Any privilege promotion page always denys read and write */
@@ -1039,23 +1058,26 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
-#if 0 && defined(CONFIG_64BIT)
- /* Revisit when we have 64-bit code above 4Gb */
- b,n intr_save2
-
+#if defined(CONFIG_64BIT)
skip_save_ior:
/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
- extrd,u,* %r8,PSW_W_BIT,1,%r1
- cmpib,COND(=),n 1,%r1,intr_save2
+ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
- /* adjust iasq/iaoq */
+ /* adjust iasq0/iaoq0 */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ0(%r29)
STREG %r17, PT_IAOQ0(%r29)
+
+ LDREG PT_IASQ1(%r29), %r16
+ LDREG PT_IAOQ1(%r29), %r17
+ /* adjust iasq1/iaoq1 */
+ space_adjust %r16,%r17,%r1
+ STREG %r16, PT_IASQ1(%r29)
+ STREG %r17, PT_IAOQ1(%r29)
#else
skip_save_ior:
#endif
@@ -1125,7 +1147,7 @@ dtlb_miss_20w:
idtlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1134,6 +1156,7 @@ dtlb_check_alias_20w:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1151,7 +1174,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1160,6 +1183,7 @@ nadtlb_check_alias_20w:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1185,7 +1209,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1195,6 +1219,7 @@ dtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1218,7 +1243,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1228,6 +1253,7 @@ nadtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1247,7 +1273,7 @@ dtlb_miss_20:
idtlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1256,6 +1282,7 @@ dtlb_check_alias_20:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1275,7 +1302,7 @@ nadtlb_miss_20:
idtlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1284,6 +1311,7 @@ nadtlb_check_alias_20:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1320,7 +1348,7 @@ itlb_miss_20w:
iitlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1344,7 +1372,7 @@ naitlb_miss_20w:
iitlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1353,6 +1381,7 @@ naitlb_check_alias_20w:
iitlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1378,7 +1407,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1402,7 +1431,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1412,6 +1441,7 @@ naitlb_check_alias_11:
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1432,7 +1462,7 @@ itlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1452,7 +1482,7 @@ naitlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1461,6 +1491,7 @@ naitlb_check_alias_20:
iitlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1482,7 +1513,7 @@ dbit_trap_20w:
idtlbt pte,prot
- ptl_unlock0 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
#else
@@ -1508,7 +1539,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock0 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1528,7 +1559,7 @@ dbit_trap_20:
idtlbt pte,prot
- ptl_unlock0 spc,t0
+ ptl_unlock spc,t0,t1
rfir
nop
#endif
@@ -1815,6 +1846,10 @@ syscall_restore_rfi:
extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
+#ifdef CONFIG_64BIT
+ extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0
+ depi -1,4,1,%r20 /* W bit */
+#endif
STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */
@@ -1828,7 +1863,6 @@ syscall_restore_rfi:
STREG %r25,TASK_PT_IASQ0(%r1)
STREG %r25,TASK_PT_IASQ1(%r1)
- /* XXX W bit??? */
/* Now if old D bit is clear, it means we didn't save all registers
* on syscall entry, so do that now. This only happens on TRACEME
* calls, or if someone attached to us while we were on a syscall.
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 4dfe1f49c5c8..042343492a28 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -74,16 +74,16 @@
static DEFINE_SPINLOCK(pdc_lock);
#endif
-unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8);
-unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8);
+static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8);
+static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8);
#ifdef CONFIG_64BIT
-#define WIDE_FIRMWARE 0x1
-#define NARROW_FIRMWARE 0x2
+#define WIDE_FIRMWARE PDC_MODEL_OS64
+#define NARROW_FIRMWARE PDC_MODEL_OS32
-/* Firmware needs to be initially set to narrow to determine the
+/* Firmware needs to be initially set to narrow to determine the
* actual firmware width. */
-int parisc_narrow_firmware __ro_after_init = 2;
+int parisc_narrow_firmware __ro_after_init = NARROW_FIRMWARE;
#endif
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
#ifdef CONFIG_64BIT
if(unlikely(parisc_narrow_firmware)) {
if((address & 0xff000000) == 0xf0000000)
- return 0xf0f0f0f000000000UL | (u32)address;
+ return (0xfffffff0UL << 32) | (u32)address;
if((address & 0xf0000000) == 0xf0000000)
- return 0xffffffff00000000UL | (u32)address;
+ return (0xffffffffUL << 32) | (u32)address;
}
#endif
return address;
@@ -134,7 +134,7 @@ static unsigned long f_extend(unsigned long address)
/**
* convert_to_wide - Convert the return buffer addresses into kernel addresses.
- * @address: The return buffer from PDC.
+ * @addr: The return buffer from PDC.
*
* This function is used to convert the return buffer addresses retrieved from PDC
* into kernel addresses when the PDC address size and kernel address size are
@@ -160,14 +160,16 @@ void set_firmware_width_unlocked(void)
ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
__pa(pdc_result), 0);
+ if (ret < 0)
+ return;
convert_to_wide(pdc_result);
if (pdc_result[0] != NARROW_FIRMWARE)
parisc_narrow_firmware = 0;
}
-
+
/**
* set_firmware_width - Determine if the firmware is wide or narrow.
- *
+ *
* This function must be called before any pdc_* function that uses the
* convert_to_wide function.
*/
@@ -176,7 +178,7 @@ void set_firmware_width(void)
unsigned long flags;
/* already initialized? */
- if (parisc_narrow_firmware != 2)
+ if (parisc_narrow_firmware != NARROW_FIRMWARE)
return;
spin_lock_irqsave(&pdc_lock, flags);
@@ -255,8 +257,8 @@ int __init pdc_instr(unsigned int *instr)
/**
* pdc_chassis_info - Return chassis information.
- * @result: The return buffer.
* @chassis_info: The memory buffer address.
+ * @led_info: The size of the memory buffer address.
* @len: The size of the memory buffer address.
*
* An HVERSION dependent call for returning the chassis information.
@@ -280,7 +282,8 @@ int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_inf
/**
* pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
- * @retval: -1 on error, 0 on success. Other value are PDC errors
+ * @state: state of the machine
+ * @data: value for that state
*
* Must be correctly formatted or expect system crash
*/
@@ -303,7 +306,7 @@ int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
/**
* pdc_chassis_disp - Updates chassis code
- * @retval: -1 on error, 0 on success
+ * @disp: value to show on display
*/
int pdc_chassis_disp(unsigned long disp)
{
@@ -318,8 +321,7 @@ int pdc_chassis_disp(unsigned long disp)
}
/**
- * pdc_cpu_rendenzvous - Stop currently executing CPU
- * @retval: -1 on error, 0 on success
+ * __pdc_cpu_rendezvous - Stop currently executing CPU and do not return.
*/
int __pdc_cpu_rendezvous(void)
{
@@ -332,7 +334,7 @@ int __pdc_cpu_rendezvous(void)
/**
* pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state
*/
-void pdc_cpu_rendezvous_lock(void)
+void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock)
{
spin_lock(&pdc_lock);
}
@@ -340,14 +342,14 @@ void pdc_cpu_rendezvous_lock(void)
/**
* pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state
*/
-void pdc_cpu_rendezvous_unlock(void)
+void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock)
{
spin_unlock(&pdc_lock);
}
/**
* pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU
- * @retval: -1 on error, 0 on success
+ * @pdc_entry: pointer to where the PDC entry point should be stored
*/
int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
{
@@ -369,7 +371,7 @@ int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
}
/**
* pdc_chassis_warn - Fetches chassis warnings
- * @retval: -1 on error, 0 on success
+ * @warn: The warning value to be shown
*/
int pdc_chassis_warn(unsigned long *warn)
{
@@ -462,7 +464,8 @@ int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result),
+ memcpy(pdc_result2, mod_path, sizeof(*mod_path));
+ retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result),
__pa(pdc_result2), mod_index);
convert_to_wide(pdc_result);
memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info));
@@ -521,6 +524,7 @@ int pdc_model_info(struct pdc_model *model)
/**
* pdc_model_sysmodel - Get the system model name.
+ * @os_id: The operating system ID asked for (an OS_ID_* value)
* @name: A char array of at least 81 characters.
*
* Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L).
@@ -549,7 +553,7 @@ int pdc_model_sysmodel(unsigned int os_id, char *name)
/**
* pdc_model_versions - Identify the version number of each processor.
- * @cpu_id: The return buffer.
+ * @versions: The return buffer.
* @id: The id of the processor to check.
*
* Returns the version number for each processor component.
@@ -684,7 +688,6 @@ int pdc_spaceid_bits(unsigned long *space_bits)
return retval;
}
-#ifndef CONFIG_PA20
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
@@ -693,18 +696,51 @@ int pdc_spaceid_bits(unsigned long *space_bits)
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
- int retval;
+ int retval;
unsigned long flags;
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
- memcpy(btlb, pdc_result, sizeof(*btlb));
- spin_unlock_irqrestore(&pdc_lock, flags);
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
- if(retval < 0) {
- btlb->max_size = 0;
- }
- return retval;
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
+ unsigned long entry_info, unsigned long slot)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
+ (unsigned long) vpage, physpage, len, entry_info, slot);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
+}
+
+int pdc_btlb_purge_all(void)
+{
+ int retval;
+ unsigned long flags;
+
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
}
/**
@@ -725,6 +761,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
int retval;
unsigned long flags;
+ if (IS_ENABLED(CONFIG_PA20))
+ return PDC_BAD_PROC;
+
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
@@ -734,7 +773,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
return retval;
}
-#endif /* !CONFIG_PA20 */
/**
* pdc_lan_station_id - Get the LAN address.
@@ -996,8 +1034,8 @@ int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
/**
* pdc_pci_config_read - read PCI config space.
- * @hpa token from PDC to indicate which PCI device
- * @pci_addr configuration space address to read from
+ * @hpa: Token from PDC to indicate which PCI device
+ * @cfg_addr: Configuration space address to read from
*
* Read PCI Configuration space *before* linux PCI subsystem is running.
*/
@@ -1019,9 +1057,9 @@ unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
/**
* pdc_pci_config_write - read PCI config space.
- * @hpa token from PDC to indicate which PCI device
- * @pci_addr configuration space address to write
- * @val value we want in the 32-bit register
+ * @hpa: Token from PDC to indicate which PCI device
+ * @cfg_addr: Configuration space address to write
+ * @val: Value we want in the 32-bit register
*
* Write PCI Configuration space *before* linux PCI subsystem is running.
*/
@@ -1232,15 +1270,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg)
}
/*
- * pdc_soft_power_button - Control the soft power button behaviour
- * @sw_control: 0 for hardware control, 1 for software control
+ * pdc_soft_power_button{_panic} - Control the soft power button behaviour
+ * @sw_control: 0 for hardware control, 1 for software control
*
*
* This PDC function places the soft power button under software or
* hardware control.
- * Under software control the OS may control to when to allow to shut
- * down the system. Under hardware control pressing the power button
+ * Under software control the OS may control to when to allow to shut
+ * down the system. Under hardware control pressing the power button
* powers off the system immediately.
+ *
+ * The _panic version relies on spin_trylock to prevent deadlock
+ * on panic path.
*/
int pdc_soft_power_button(int sw_control)
{
@@ -1254,6 +1295,22 @@ int pdc_soft_power_button(int sw_control)
return retval;
}
+int pdc_soft_power_button_panic(int sw_control)
+{
+ int retval;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&pdc_lock, flags)) {
+ pr_emerg("Couldn't enable soft power button\n");
+ return -EBUSY; /* ignored by the panic notifier */
+ }
+
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
/*
* pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
* Primarily a problem on T600 (which parisc-linux doesn't support) but
@@ -1303,7 +1360,7 @@ static char iodc_dbuf[4096] __page_aligned_bss;
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
- unsigned int i;
+ unsigned int i, found = 0;
unsigned long flags;
count = min_t(unsigned int, count, sizeof(iodc_dbuf));
@@ -1315,6 +1372,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
+ found = 1;
goto print;
default:
iodc_dbuf[i] = str[i];
@@ -1330,7 +1388,7 @@ print:
__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
- return i;
+ return i - found;
}
#if !defined(BOOTLOADER)
@@ -1369,17 +1427,25 @@ int pdc_iodc_getc(void)
}
int pdc_sti_call(unsigned long func, unsigned long flags,
- unsigned long inptr, unsigned long outputr,
- unsigned long glob_cfg)
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg, int do_call64)
{
- int retval;
+ int retval = 0;
unsigned long irqflags;
- spin_lock_irqsave(&pdc_lock, irqflags);
- retval = real32_call(func, flags, inptr, outputr, glob_cfg);
- spin_unlock_irqrestore(&pdc_lock, irqflags);
+ spin_lock_irqsave(&pdc_lock, irqflags);
+ if (IS_ENABLED(CONFIG_64BIT) && do_call64) {
+#ifdef CONFIG_64BIT
+ retval = real64_call(func, flags, inptr, outputr, glob_cfg);
+#else
+ WARN_ON(1);
+#endif
+ } else {
+ retval = real32_call(func, flags, inptr, outputr, glob_cfg);
+ }
+ spin_unlock_irqrestore(&pdc_lock, irqflags);
- return retval;
+ return retval;
}
EXPORT_SYMBOL(pdc_sti_call);
@@ -1529,7 +1595,7 @@ int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
/**
* pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
- * @actlen: The return buffer.
+ * @actual_len: The return buffer.
* @mem_addr: Pointer to the memory buffer.
* @count: The number of bytes to read from the buffer.
* @offset: The offset with respect to the beginning of the buffer.
@@ -1552,7 +1618,7 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
}
/**
- * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions.
+ * pdc_pat_pd_get_pdc_revisions - Retrieve PDC interface revisions.
* @legacy_rev: The legacy revision.
* @pat_rev: The PAT revision.
* @pdc_cap: The PDC capabilities.
@@ -1607,7 +1673,7 @@ int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
* pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
* @pci_addr: PCI configuration space address for which the write request is being made.
* @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
- * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
+ * @val: Pointer to 1, 2, or 4 byte value in low order end of argument to be
* written to PCI Config space.
*
*/
@@ -1625,7 +1691,7 @@ int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
}
/**
- * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table
+ * pdc_pat_mem_pdt_info - Retrieve information about page deallocation table
* @rinfo: memory pdt information
*
*/
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 4d392e4ed358..10fd5b3e63e7 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -20,7 +20,7 @@
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#define __hot __section(".text.hot")
@@ -53,7 +53,7 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
static ftrace_func_t ftrace_func;
-void notrace __hot ftrace_function_trampoline(unsigned long parent,
+asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
struct ftrace_regs *fregs)
@@ -78,7 +78,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
@@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void)
int ftrace_disable_ftrace_graph_caller(void)
{
- static_key_enable(&ftrace_graph_enable.key);
+ static_key_disable(&ftrace_graph_enable.key);
return 0;
}
#endif
@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index fd15fd4bbb61..96e0264ac961 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -70,9 +70,8 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
- /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
- * and halt kernel if we detect a PA1.x CPU. */
+#if defined(CONFIG_PA20)
+ /* check for 64-bit capable CPU as required by current kernel */
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
@@ -180,10 +179,10 @@ $pgt_fill_loop:
std %dp,0x18(%r10)
#endif
-#ifdef CONFIG_64BIT
- /* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
+#ifdef CONFIG_64BIT
+ /* Get PDCE_PROC for monarch CPU. */
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
@@ -269,7 +268,17 @@ stext_pdc_ret:
tovirt_r1 %r6
mtctl %r6,%cr30 /* restore task thread info */
#endif
-
+
+#ifndef CONFIG_64BIT
+ /* clear all BTLBs */
+ ldi PDC_BLOCK_TLB,%arg0
+ load32 PA(stext_pdc_btlb_ret), %rp
+ ldw MEM_PDC_LO(%r0),%r3
+ bv (%r3)
+ ldi PDC_BTLB_PURGE_ALL,%arg1
+stext_pdc_btlb_ret:
+#endif
+
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index b05055f3ba4b..dff66be65d29 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -24,9 +24,6 @@
#undef PARISC_IRQ_CR16_COUNTS
-extern irqreturn_t timer_interrupt(int, void *);
-extern irqreturn_t ipi_interrupt(int, void *);
-
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
@@ -368,7 +365,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
@@ -489,7 +486,7 @@ void do_softirq_own_stack(void)
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
-void do_cpu_irq_mask(struct pt_regs *regs)
+asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned long eirr_val;
@@ -501,7 +498,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
local_irq_disable();
- irq_enter();
+ irq_enter_rcu();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val)
@@ -536,7 +533,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
#endif /* CONFIG_IRQSTACKS */
out:
- irq_exit();
+ irq_exit_rcu();
set_irq_regs(old_regs);
return;
diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c
index e253b134500d..ea51f15bf0e6 100644
--- a/arch/parisc/kernel/jump_label.c
+++ b/arch/parisc/kernel/jump_label.c
@@ -8,7 +8,7 @@
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <asm/alternative.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
static inline int reassemble_17(int as17)
{
diff --git a/arch/parisc/kernel/kexec.c b/arch/parisc/kernel/kexec.c
index 5eb7f30edc1f..db57345a9daf 100644
--- a/arch/parisc/kernel/kexec.c
+++ b/arch/parisc/kernel/kexec.c
@@ -4,6 +4,8 @@
#include <linux/console.h>
#include <linux/kexec.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
+
#include <asm/cacheflush.h>
#include <asm/sections.h>
diff --git a/arch/parisc/kernel/kexec_file.c b/arch/parisc/kernel/kexec_file.c
index 8c534204f0fd..3fc82130b6c3 100644
--- a/arch/parisc/kernel/kexec_file.c
+++ b/arch/parisc/kernel/kexec_file.c
@@ -38,8 +38,8 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
for (i = 0; i < image->nr_segments; i++)
image->segment[i].mem = __pa(image->segment[i].mem);
- pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n",
- kernel_load_addr, image->start);
+ kexec_dprintk("Loaded the kernel at 0x%lx, entry at 0x%lx\n",
+ kernel_load_addr, image->start);
if (initrd != NULL) {
kbuf.buffer = initrd;
@@ -51,7 +51,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
- pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem);
+ kexec_dprintk("Loaded initrd at 0x%lx\n", kbuf.mem);
image->arch.initrd_start = kbuf.mem;
image->arch.initrd_end = kbuf.mem + initrd_len;
}
@@ -68,7 +68,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf,
if (ret)
goto out;
- pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem);
+ kexec_dprintk("Loaded cmdline at 0x%lx\n", kbuf.mem);
image->arch.cmdline = kbuf.mem;
}
out:
diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c
index b16fa9bac5f4..fee81f877525 100644
--- a/arch/parisc/kernel/kgdb.c
+++ b/arch/parisc/kernel/kgdb.c
@@ -16,7 +16,7 @@
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/processor.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#include <asm/cacheflush.h>
const struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
index 6e0b86652f30..9255adba67a3 100644
--- a/arch/parisc/kernel/kprobes.c
+++ b/arch/parisc/kernel/kprobes.c
@@ -12,7 +12,7 @@
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 7df140545b22..4e5d991b2b65 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -27,9 +27,9 @@
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
* if (in_init(me, (void *)val))
- * val -= (uint32_t)me->init_layout.base;
+ * val -= (uint32_t)me->mem[MOD_INIT_TEXT].base;
* else
- * val -= (uint32_t)me->core_layout.base;
+ * val -= (uint32_t)me->mem[MOD_TEXT].base;
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
@@ -41,7 +41,6 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
@@ -76,25 +75,6 @@
* allows us to allocate up to 4095 GOT entries. */
#define MAX_GOTS 4095
-/* three functions to determine where in the module core
- * or init pieces the location is */
-static inline int in_init(struct module *me, void *loc)
-{
- return (loc >= me->init_layout.base &&
- loc <= (me->init_layout.base + me->init_layout.size));
-}
-
-static inline int in_core(struct module *me, void *loc)
-{
- return (loc >= me->core_layout.base &&
- loc <= (me->core_layout.base + me->core_layout.size));
-}
-
-static inline int in_local(struct module *me, void *loc)
-{
- return in_init(me, loc) || in_core(me, loc);
-}
-
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
@@ -192,17 +172,6 @@ static inline int reassemble_22(int as22)
((as22 & 0x0003ff) << 3));
}
-void *module_alloc(unsigned long size)
-{
- /* using RWX means less protection for modules, but it's
- * easier than trying to map the text, data, init_text and
- * init_data correctly */
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL,
- PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
@@ -302,6 +271,7 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
{
unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
+ struct module_memory *mod_mem;
len = hdr->e_shnum * sizeof(me->arch.section[0]);
me->arch.section = kzalloc(len, GFP_KERNEL);
@@ -346,14 +316,15 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
me->arch.section[s].stub_entries += count;
}
+ mod_mem = &me->mem[MOD_TEXT];
/* align things a bit */
- me->core_layout.size = ALIGN(me->core_layout.size, 16);
- me->arch.got_offset = me->core_layout.size;
- me->core_layout.size += gots * sizeof(struct got_entry);
+ mod_mem->size = ALIGN(mod_mem->size, 16);
+ me->arch.got_offset = mod_mem->size;
+ mod_mem->size += gots * sizeof(struct got_entry);
- me->core_layout.size = ALIGN(me->core_layout.size, 16);
- me->arch.fdesc_offset = me->core_layout.size;
- me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
+ mod_mem->size = ALIGN(mod_mem->size, 16);
+ me->arch.fdesc_offset = mod_mem->size;
+ mod_mem->size += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
@@ -371,7 +342,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
BUG_ON(value == 0);
- got = me->core_layout.base + me->arch.got_offset;
+ got = me->mem[MOD_TEXT].base + me->arch.got_offset;
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
@@ -389,7 +360,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
- Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
+ Elf_Fdesc *fdesc = me->mem[MOD_TEXT].base + me->arch.fdesc_offset;
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
@@ -407,7 +378,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
/* Create new one */
fdesc->addr = value;
- fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+ fdesc->gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* CONFIG_64BIT */
@@ -742,7 +713,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
loc, val);
val += addend;
/* can we reach it locally? */
- if (in_local(me, (void *)val)) {
+ if (within_module(val, me)) {
/* this is the case where the symbol is local
* to the module, but in a different section,
* so stub the jump in case it's more than 22
@@ -801,7 +772,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
- if(in_local(me, (void *)(val + addend))) {
+ if (within_module(val + addend, me)) {
*loc64 = get_fdesc(me, val+addend);
pr_debug("FDESC for %s at %llx points to %llx\n",
strtab + sym->st_name, *loc64,
@@ -839,7 +810,7 @@ register_unwind_table(struct module *me,
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size;
- gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+ gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
@@ -862,7 +833,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const char *strtab = NULL;
const Elf_Shdr *s;
char *secstrings;
- int symindex = -1;
+ int symindex __maybe_unused = -1;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
@@ -977,7 +948,7 @@ void module_arch_cleanup(struct module *mod)
#ifdef CONFIG_64BIT
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
- unsigned long start_opd = (Elf64_Addr)mod->core_layout.base +
+ unsigned long start_opd = (Elf64_Addr)mod->mem[MOD_TEXT].base +
mod->arch.fdesc_offset;
unsigned long end_opd = start_opd +
mod->arch.fdesc_count * sizeof(Elf64_Fdesc);
diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c
deleted file mode 100644
index 0d770ac83f70..000000000000
--- a/arch/parisc/kernel/pa7300lc.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/parisc/kernel/pa7300lc.c
- * - PA7300LC-specific functions
- *
- * Copyright (C) 2000 Philipp Rumpf */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <asm/ptrace.h>
-#include <asm/machdep.h>
-
-/* CPU register indices */
-
-#define MIOC_STATUS 0xf040
-#define MIOC_CONTROL 0xf080
-#define MDERRADD 0xf0e0
-#define DMAERR 0xf0e8
-#define DIOERR 0xf0ec
-#define HIDMAMEM 0xf0f4
-
-/* this returns the HPA of the CPU it was called on */
-static u32 cpu_hpa(void)
-{
- return 0xfffb0000;
-}
-
-static void pa7300lc_lpmc(int code, struct pt_regs *regs)
-{
- u32 hpa;
- printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
-
- show_regs(regs);
-
- hpa = cpu_hpa();
- printk(KERN_WARNING
- "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n"
- "MDERRADD %08x\n" "DMAERR %08x\n"
- "DIOERR %08x\n" "HIDMAMEM %08x\n",
- gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
- gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
- gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
-}
-
-void pa7300lc_init(void)
-{
- cpu_lpmc = pa7300lc_lpmc;
-}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 9a0018f1f42c..541370d14559 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 00297e8e1c88..509146a52725 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
+#include <linux/libgcc.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
@@ -21,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
@@ -40,9 +43,6 @@ EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
-EXPORT_SYMBOL(memcpy_toio);
-EXPORT_SYMBOL(memcpy_fromio);
-EXPORT_SYMBOL(memset_io);
extern void $$divI(void);
extern void $$divU(void);
@@ -92,12 +92,6 @@ EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
-extern void __ashrdi3(void);
-extern void __ashldi3(void);
-extern void __lshrdi3(void);
-extern void __muldi3(void);
-extern void __ucmpdi2(void);
-
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
index e59574f65e64..35dd764b871e 100644
--- a/arch/parisc/kernel/patch.c
+++ b/arch/parisc/kernel/patch.c
@@ -13,7 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
struct patch {
void *addr;
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index ba87f791323b..bf9f192c826e 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -39,7 +39,7 @@ static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static unsigned long pcxl_used_bytes __read_mostly;
static unsigned long pcxl_used_pages __read_mostly;
-extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
+unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */
static DEFINE_SPINLOCK(pcxl_res_lock);
static char *pcxl_res_map;
static int pcxl_res_hint;
@@ -164,7 +164,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
pmd_clear(pmd);
return;
}
- pte = pte_offset_map(pmd, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
@@ -245,7 +245,7 @@ static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
PCXL_SEARCH_LOOP(idx, mask, size); \
}
-unsigned long
+static unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
@@ -381,7 +381,7 @@ pcxl_dma_init(void)
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
- proc_gsc_root = proc_mkdir("gsc", NULL);
+ proc_gsc_root = proc_mkdir("bus/gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
@@ -417,14 +417,6 @@ void *arch_dma_alloc(struct device *dev, size_t size,
map_uncached_pages(vaddr, size, paddr);
*dma_handle = (dma_addr_t) paddr;
-#if 0
-/* This probably isn't needed to support EISA cards.
-** ISA cards will certainly only support 24-bit DMA addressing.
-** Not clear if we can, want, or need to support ISA.
-*/
- if (!dev || *dev->coherent_dma_mask < 0xffffffff)
- gfp |= GFP_DMA;
-#endif
return (void *)vaddr;
}
@@ -446,11 +438,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
+ /*
+ * fdc: The data cache line is written back to memory, if and only if
+ * it is dirty, and then invalidated from the data cache.
+ */
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
+ unsigned long addr = (unsigned long) phys_to_virt(paddr);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_kernel_dcache_range(addr, size);
+ return;
+ case DMA_FROM_DEVICE:
+ purge_kernel_dcache_range_asm(addr, addr + size);
+ return;
+ default:
+ BUG();
+ }
}
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index da154406d368..d477d0177c2f 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -31,6 +31,7 @@
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
+#include <asm/led.h>
#define PDC_CHASSIS_VER "0.05"
@@ -40,7 +41,7 @@ static unsigned int pdc_chassis_enabled __read_mostly = 1;
/**
* pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
- * @str configuration param: 0 to disable chassis log
+ * @str: configuration param: 0 to disable chassis log
* @return 1
*/
@@ -55,7 +56,6 @@ __setup("pdcchassis=", pdc_chassis_setup);
/**
* pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
- * @pdc_chassis_old: 1 if old pdc chassis style
*
* Currently, only E class and A180 are known to work with this.
* Inspired by Christoph Plattner
@@ -80,6 +80,9 @@ static void __init pdc_chassis_checkold(void)
/**
* pdc_chassis_panic_event() - Called by the panic handler.
+ * @this: unused
+ * @event: unused
+ * @ptr: unused
*
* As soon as a panic occurs, we should inform the PDC.
*/
@@ -88,7 +91,7 @@ static int pdc_chassis_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
- return NOTIFY_DONE;
+ return NOTIFY_DONE;
}
@@ -99,7 +102,10 @@ static struct notifier_block pdc_chassis_panic_block = {
/**
- * parisc_reboot_event() - Called by the reboot handler.
+ * pdc_chassis_reboot_event() - Called by the reboot handler.
+ * @this: unused
+ * @event: unused
+ * @ptr: unused
*
* As soon as a reboot occurs, we should inform the PDC.
*/
@@ -108,7 +114,7 @@ static int pdc_chassis_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
- return NOTIFY_DONE;
+ return NOTIFY_DONE;
}
@@ -148,7 +154,7 @@ void __init parisc_pdc_chassis_init(void)
/**
* pdc_chassis_send_status() - Sends a predefined message to the chassis,
* and changes the front panel LEDs according to the new system state
- * @retval: PDC call return value.
+ * @message: Type of message, one of PDC_CHASSIS_DIRECT_* values.
*
* Only machines with 64 bits PDC PAT and those reported in
* pdc_chassis_checkold() are supported atm.
@@ -229,6 +235,11 @@ int pdc_chassis_send_status(int message)
} else retval = -1;
#endif /* CONFIG_64BIT */
} /* if (pdc_chassis_enabled) */
+
+ /* if system has LCD display, update current string */
+ if (retval != -1 && IS_ENABLED(CONFIG_CHASSIS_LCD_LED))
+ lcd_print(NULL);
+
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index 80943a00e245..b70b67adb855 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -16,6 +16,7 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/kthread.h>
+#include <linux/proc_fs.h>
#include <linux/initrd.h>
#include <linux/pgtable.h>
#include <linux/mm.h>
@@ -23,6 +24,7 @@
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
+#include <asm/pgtable.h>
enum pdt_access_type {
PDT_NONE,
@@ -61,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
#define PDT_ADDR_SINGLE_ERR 1UL
+#ifdef CONFIG_PROC_FS
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
@@ -72,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "PDT_cur_entries: %7lu\n",
pdt_status.pdt_entries);
}
+#endif
static int get_info_pat_new(void)
{
@@ -352,10 +356,8 @@ static int __init pdt_initcall(void)
return -ENODEV;
kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd");
- if (IS_ERR(kpdtd_task))
- return PTR_ERR(kpdtd_task);
- return 0;
+ return PTR_ERR_OR_ZERO(kpdtd_task);
}
late_initcall(pdt_initcall);
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index d46b6709ec56..5e10f98ce7b5 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -57,7 +57,7 @@ struct rdr_tbl_ent {
static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
static int perf_enabled __read_mostly;
static DEFINE_SPINLOCK(perf_lock);
-struct parisc_device *cpu_device __read_mostly;
+static struct parisc_device *cpu_device __read_mostly;
/* RDRs to write for PCX-W */
static const int perf_rdrs_W[] =
@@ -288,7 +288,7 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- size_t image_size;
+ size_t image_size __maybe_unused;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
@@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
@@ -476,9 +475,9 @@ static const struct file_operations perf_fops = {
};
static struct miscdevice perf_dev = {
- MISC_DYNAMIC_MINOR,
- PA_PERF_DEV,
- &perf_fops
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = PA_PERF_DEV,
+ .fops = &perf_fops,
};
/*
diff --git a/arch/parisc/kernel/perf_event.c b/arch/parisc/kernel/perf_event.c
new file mode 100644
index 000000000000..f90b83886ab4
--- /dev/null
+++ b/arch/parisc/kernel/perf_event.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Performance event support for parisc
+ *
+ * Copyright (C) 2025 by Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <asm/unwind.h>
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+
+ struct unwind_frame_info info;
+
+ unwind_frame_init_task(&info, current, NULL);
+ while (1) {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ break;
+
+ if (!__kernel_text_address(info.ip) ||
+ perf_callchain_store(entry, info.ip))
+ return;
+ }
+}
diff --git a/arch/parisc/kernel/perf_regs.c b/arch/parisc/kernel/perf_regs.c
new file mode 100644
index 000000000000..10a1a5f06a18
--- /dev/null
+++ b/arch/parisc/kernel/perf_regs.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2025 by Helge Deller <deller@gmx.de> */
+
+#include <linux/perf_event.h>
+#include <linux/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ switch (idx) {
+ case PERF_REG_PARISC_R0 ... PERF_REG_PARISC_R31:
+ return regs->gr[idx - PERF_REG_PARISC_R0];
+ case PERF_REG_PARISC_SR0 ... PERF_REG_PARISC_SR7:
+ return regs->sr[idx - PERF_REG_PARISC_SR0];
+ case PERF_REG_PARISC_IASQ0 ... PERF_REG_PARISC_IASQ1:
+ return regs->iasq[idx - PERF_REG_PARISC_IASQ0];
+ case PERF_REG_PARISC_IAOQ0 ... PERF_REG_PARISC_IAOQ1:
+ return regs->iasq[idx - PERF_REG_PARISC_IAOQ0];
+ case PERF_REG_PARISC_SAR: /* CR11 */
+ return regs->sar;
+ case PERF_REG_PARISC_IIR: /* CR19 */
+ return regs->iir;
+ case PERF_REG_PARISC_ISR: /* CR20 */
+ return regs->isr;
+ case PERF_REG_PARISC_IOR: /* CR21 */
+ return regs->ior;
+ case PERF_REG_PARISC_IPSW: /* CR22 */
+ return regs->ipsw;
+ }
+ WARN_ON_ONCE((u32)idx >= PERF_REG_PARISC_MAX);
+ return 0;
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_PARISC_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return PERF_SAMPLE_REGS_ABI_32;
+
+ if (test_tsk_thread_flag(task, TIF_32BIT))
+ return PERF_SAMPLE_REGS_ABI_32;
+
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index c4f8374c7018..e64ab5d2a40d 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -97,18 +97,12 @@ void machine_restart(char *cmd)
}
-void (*chassis_power_off)(void);
-
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
- /* If there is a registered power off handler, call it. */
- if (chassis_power_off)
- chassis_power_off();
-
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
@@ -122,13 +116,18 @@ void machine_power_off(void)
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
- printk(KERN_EMERG "System shut down completed.\n"
- "Please power this system off now.");
+ printk("Power off or press RETURN to reboot.\n");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
- for (;;);
+ while (1) {
+ /* reboot if user presses RETURN key */
+ if (pdc_iodc_getc() == 13) {
+ printk("Rebooting...\n");
+ machine_restart(NULL);
+ }
+ }
}
void (*pm_power_off)(void);
@@ -159,15 +158,15 @@ EXPORT_SYMBOL(running_on_qemu);
/*
* Called from the idle thread for the CPU which has been shutdown.
*/
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
#ifdef CONFIG_HOTPLUG_CPU
idle_task_exit();
local_irq_disable();
- /* Tell __cpu_die() that this CPU is now safe to dispose of. */
- (void)cpu_report_death();
+ /* Tell the core that this CPU is now safe to dispose of. */
+ cpuhp_ap_report_dead();
/* Ensure that the cache lines are written out. */
flush_cache_all_local();
@@ -183,8 +182,6 @@ void arch_cpu_idle_dead(void)
void __cpuidle arch_cpu_idle(void)
{
- raw_local_irq_enable();
-
/* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::);
}
@@ -204,7 +201,7 @@ arch_initcall(parisc_idle_init);
int
copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *cregs = &(p->thread.regs);
@@ -281,17 +278,3 @@ __get_wchan(struct task_struct *p)
} while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}
-
-static inline unsigned long brk_rnd(void)
-{
- return (get_random_u32() & BRK_RND_MASK) << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
- return ret;
-}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index ba07e760d3c7..bf73562706b2 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -26,6 +26,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
+#include <asm/smp.h>
#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
@@ -58,7 +59,7 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
*/
/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * init_percpu_prof - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
@@ -171,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
- set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
@@ -241,9 +241,9 @@ void __init collect_boot_cpu_data(void)
/* get CPU-Model Information... */
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
- printk(KERN_INFO
- "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+ printk(KERN_INFO
+ "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]);
add_device_randomness(&boot_cpu_data.pdc.model,
sizeof(boot_cpu_data.pdc.model));
@@ -367,6 +367,8 @@ int init_per_cpu(int cpunum)
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
+ btlb_init_per_cpu();
+
return ret;
}
@@ -377,10 +379,18 @@ int
show_cpuinfo (struct seq_file *m, void *v)
{
unsigned long cpu;
+ char cpu_name[60], *p;
+
+ /* strip PA path from CPU name to not confuse lscpu */
+ strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
+ p = strrchr(cpu_name, '[');
+ if (p)
+ *(--p) = 0;
for_each_online_cpu(cpu) {
- const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+
if (0 == cpuinfo->hpa)
continue;
#endif
@@ -425,8 +435,7 @@ show_cpuinfo (struct seq_file *m, void *v)
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
- cpuinfo->dev ?
- cpuinfo->dev->name : "Unknown");
+ cpu_name);
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
@@ -464,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
*/
void __init processor_init(void)
{
- unsigned int cpu;
-
reset_cpu_topology();
-
- /* reset possible mask. We will mark those which are possible. */
- for_each_possible_cpu(cpu)
- set_cpu_possible(cpu, false);
-
register_parisc_driver(&cpu_driver);
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 69c62933e952..8a17ab7e6e0b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long tmp;
long ret = -EIO;
+ unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
+#ifdef CONFIG_64BIT
+ if (is_compat_task())
+ user_regs_struct_size /= 2;
+#endif
+
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
@@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request,
addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
- data |= 3; /* ensure userspace privilege */
+ data |= PRIV_USER; /* ensure userspace privilege */
}
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
@@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request,
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
@@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
- data |= 3; /* ensure userspace privilege */
+ data |= PRIV_USER; /* ensure userspace privilege */
}
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
@@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
}
break;
+ case PTRACE_GETREGS:
+ case PTRACE_SETREGS:
+ case PTRACE_GETFPREGS:
+ case PTRACE_SETFPREGS:
+ return arch_ptrace(child, request, addr, data);
default:
ret = compat_ptrace_request(child, request, addr, data);
@@ -484,7 +495,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
case RI(iaoq[0]):
case RI(iaoq[1]):
/* set 2 lowest bits to ensure userspace privilege: */
- regs->iaoq[num - RI(iaoq[0])] = val | 3;
+ regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
return;
case RI(sar): regs->sar = val;
return;
@@ -551,12 +562,12 @@ static int gpr_set(struct task_struct *target,
static const struct user_regset native_regsets[] = {
[REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.regset_get = gpr_get, .set = gpr_set
},
[REGSET_FP] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG,
.size = sizeof(__u64), .align = sizeof(__u64),
.regset_get = fpr_get, .set = fpr_set
}
@@ -618,12 +629,12 @@ static int gpr32_set(struct task_struct *target,
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.regset_get = gpr32_get, .set = gpr32_set
},
[REGSET_FP] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG,
.size = sizeof(__u64), .align = sizeof(__u64),
.regset_get = fpr_get, .set = fpr_set
}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 4dc12c4c0980..509d18b8e0e6 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -235,9 +235,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */
copy %arg2, %r31
- /* set up the new ap */
- ldo 64(%arg1), %r29
-
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
@@ -249,7 +246,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+ /* set up real-mode stack and real-mode ap */
tophys_r1 %sp
+ ldo -16(%sp), %r29 /* Reference param save area */
b,l rfi_virt2real,%r2
nop
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 0797db617962..ace483b6f19a 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -31,7 +31,6 @@
#include <asm/sections.h>
#include <asm/pdc.h>
#include <asm/led.h>
-#include <asm/machdep.h> /* for pa7300lc_init() proto */
#include <asm/pdc_chassis.h>
#include <asm/io.h>
#include <asm/setup.h>
@@ -40,12 +39,7 @@
static char __initdata command_line[COMMAND_LINE_SIZE];
-/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
-struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
-struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
-struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
-
-void __init setup_cmdline(char **cmdline_p)
+static void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
char *p;
@@ -86,7 +80,7 @@ void __init setup_cmdline(char **cmdline_p)
}
#ifdef CONFIG_PA11
-void __init dma_ops_init(void)
+static void __init dma_ops_init(void)
{
switch (boot_cpu_data.cpu_type) {
case pcx:
@@ -98,21 +92,14 @@ void __init dma_ops_init(void)
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
case pcxl2:
- pa7300lc_init();
- break;
default:
break;
}
}
#endif
-extern void collect_boot_cpu_data(void);
-
void __init setup_arch(char **cmdline_p)
{
-#ifdef CONFIG_64BIT
- extern int parisc_narrow_firmware;
-#endif
unwind_init();
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
@@ -153,11 +140,6 @@ void __init setup_arch(char **cmdline_p)
parisc_cache_init();
paging_init();
-#ifdef CONFIG_CHASSIS_LCD_LED
- /* initialize the LCD/LED after boot_cpu_data is available ! */
- led_init(); /* LCD/LED initialization */
-#endif
-
#ifdef CONFIG_PA11
dma_ops_init();
#endif
@@ -167,10 +149,7 @@ void __init setup_arch(char **cmdline_p)
/*
* Display CPU info for all CPUs.
- * for parisc this is in processor.c
*/
-extern int show_cpuinfo (struct seq_file *m, void *v);
-
static void *
c_start (struct seq_file *m, loff_t *pos)
{
@@ -201,48 +180,6 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
-static void __init parisc_proc_mkdir(void)
-{
- /*
- ** Can't call proc_mkdir() until after proc_root_init() has been
- ** called by start_kernel(). In other words, this code can't
- ** live in arch/.../setup.c because start_parisc() calls
- ** start_kernel().
- */
- switch (boot_cpu_data.cpu_type) {
- case pcxl:
- case pcxl2:
- if (NULL == proc_gsc_root)
- {
- proc_gsc_root = proc_mkdir("bus/gsc", NULL);
- }
- break;
- case pcxt_:
- case pcxu:
- case pcxu_:
- case pcxw:
- case pcxw_:
- case pcxw2:
- if (NULL == proc_runway_root)
- {
- proc_runway_root = proc_mkdir("bus/runway", NULL);
- }
- break;
- case mako:
- case mako2:
- if (NULL == proc_mckinley_root)
- {
- proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
- }
- break;
- default:
- /* FIXME: this was added to prevent the compiler
- * complaining about missing pcx, pcxs and pcxt
- * I'm assuming they have neither gsc nor runway */
- break;
- }
-}
-
static struct resource central_bus = {
.name = "Central Bus",
.start = F_EXTEND(0xfff80000),
@@ -295,21 +232,10 @@ static int __init parisc_init_resources(void)
return 0;
}
-extern void gsc_init(void);
-extern void processor_init(void);
-extern void ccio_init(void);
-extern void hppb_init(void);
-extern void dino_init(void);
-extern void iosapic_init(void);
-extern void lba_init(void);
-extern void sba_init(void);
-extern void eisa_init(void);
-
static int __init parisc_init(void)
{
u32 osid = (OS_ID_LINUX << 16);
- parisc_proc_mkdir();
parisc_init_resources();
do_device_inventory(); /* probe for hardware */
@@ -344,55 +270,12 @@ static int __init parisc_init(void)
apply_alternatives_all();
parisc_setup_cache_timing();
-
- /* These are in a non-obvious order, will fix when we have an iotree */
-#if defined(CONFIG_IOSAPIC)
- iosapic_init();
-#endif
-#if defined(CONFIG_IOMMU_SBA)
- sba_init();
-#endif
-#if defined(CONFIG_PCI_LBA)
- lba_init();
-#endif
-
- /* CCIO before any potential subdevices */
-#if defined(CONFIG_IOMMU_CCIO)
- ccio_init();
-#endif
-
- /*
- * Need to register Asp & Wax before the EISA adapters for the IRQ
- * regions. EISA must come before PCI to be sure it gets IRQ region
- * 0.
- */
-#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
- gsc_init();
-#endif
-#ifdef CONFIG_EISA
- eisa_init();
-#endif
-
-#if defined(CONFIG_HPPB)
- hppb_init();
-#endif
-
-#if defined(CONFIG_GSC_DINO)
- dino_init();
-#endif
-
-#ifdef CONFIG_CHASSIS_LCD_LED
- register_led_regions(); /* register LED port info in procfs */
-#endif
-
return 0;
}
arch_initcall(parisc_init);
void __init start_parisc(void)
{
- extern void early_trap_init(void);
-
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8bc0ddaa6219..e8d27def6c52 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -73,7 +73,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
return err;
}
-void
+asmlinkage void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
@@ -176,7 +176,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
}
static long
-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, long in_syscall)
{
unsigned long flags = 0;
@@ -211,7 +211,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_sysc
static long
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
- int in_syscall)
+ long in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
@@ -380,7 +380,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
*/
static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
+handle_signal(struct ksignal *ksig, struct pt_regs *regs, long in_syscall)
{
int ret;
sigset_t *oldset = sigmask_to_save();
@@ -423,7 +423,7 @@ static void check_syscallno_in_delay_branch(struct pt_regs *regs)
regs->gr[31] -= 8; /* delayed branching */
/* Get assembler opcode of code in delay branch */
- uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
+ uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4);
err = get_user(opcode, uaddr);
if (err)
return;
@@ -578,7 +578,7 @@ static void do_signal(struct pt_regs *regs, long in_syscall)
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, long in_syscall)
+asmlinkage void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_NOTIFY_SIGNAL))
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 7dbd92cafae3..b2d12ab728b1 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -246,8 +246,8 @@ void kgdb_roundup_cpus(void)
inline void
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
-void
-smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
+void
+arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
smp_send_all_nop(void)
@@ -271,9 +271,6 @@ void arch_send_call_function_single_ipi(int cpu)
static void
smp_cpu_init(int cpunum)
{
- extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
- extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
-
/* Set modes and Enable floating point coprocessor */
init_per_cpu(cpunum);
@@ -300,7 +297,7 @@ smp_cpu_init(int cpunum)
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
- start_cpu_itimer();
+ parisc_clockevent_init();
}
@@ -347,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
struct irq_desc *desc = irq_to_desc(i);
if (desc && desc->kstat_irqs)
- *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+ *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { };
}
#endif
@@ -407,13 +404,7 @@ alive:
void __init smp_prepare_boot_cpu(void)
{
- int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
-
- /* Setup BSP mappings */
- printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
-
- set_cpu_online(bootstrap_processor, true);
- set_cpu_present(bootstrap_processor, true);
+ pr_info("SMP: bootstrap CPU ID is 0\n");
}
@@ -443,7 +434,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
if (cpu_online(cpu))
return 0;
- if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
+ if (num_online_cpus() < nr_cpu_ids &&
+ num_online_cpus() < setup_max_cpus &&
+ smp_boot_one_cpu(cpu, tidle))
return -EIO;
return cpu_online(cpu) ? 0 : -EIO;
@@ -500,11 +493,10 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
pdc_cpu_rendezvous_lock();
+}
- if (!cpu_wait_death(cpu, 5)) {
- pr_crit("CPU%u: cpu didn't die\n", cpu);
- return;
- }
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
+{
pr_info("CPU%u: is shutting down\n", cpu);
/* set task's state to interruptible sleep */
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 09a34b07f02e..b2cdbb8a12b1 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -24,32 +24,28 @@
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/compat.h>
+#include <linux/elf-randomize.h>
-/* we construct an artificial offset for the mapping based on the physical
- * address of the kernel mapping variable */
-#define GET_LAST_MMAP(filp) \
- (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
-#define SET_LAST_MMAP(filp, val) \
- { /* nothing */ }
-
-static int get_offset(unsigned int last_mmap)
-{
- return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
-}
+/*
+ * Construct an artificial page offset for the mapping based on the physical
+ * address of the kernel file mapping variable.
+ */
+#define GET_FILP_PGOFF(filp) \
+ (filp ? (((unsigned long) filp->f_mapping) >> 8) \
+ & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
-static unsigned long shared_align_offset(unsigned int last_mmap,
+static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff)
{
- return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
+ return (filp_pgoff + pgoff) << PAGE_SHIFT;
}
static inline unsigned long COLOR_ALIGN(unsigned long addr,
- unsigned int last_mmap, unsigned long pgoff)
+ unsigned long filp_pgoff, unsigned long pgoff)
{
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
unsigned long off = (SHM_COLOUR-1) &
- (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
-
+ shared_align_offset(filp_pgoff, pgoff);
return base + off;
}
@@ -81,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
* indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout().
*/
-static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
+unsigned long mmap_upper_limit(const struct rlimit *rlim_stack)
{
unsigned long stack_base;
@@ -98,92 +94,43 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
return PAGE_ALIGN(STACK_TOP - stack_base);
}
+enum mmap_allocation_direction {UP, DOWN};
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- unsigned long task_size = TASK_SIZE;
- int do_color_align, last_mmap;
- struct vm_unmapped_area_info info;
+ unsigned long filp_pgoff;
+ int do_color_align;
+ struct vm_unmapped_area_info info = {
+ .length = len
+ };
- if (len > task_size)
+ if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
- last_mmap = GET_LAST_MMAP(filp);
+ filp_pgoff = GET_FILP_PGOFF(filp);
if (flags & MAP_FIXED) {
- if ((flags & MAP_SHARED) && last_mmap &&
- (addr - shared_align_offset(last_mmap, pgoff))
- & (SHM_COLOUR - 1))
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+ if (TASK_SIZE - len < addr)
return -EINVAL;
- goto found_addr;
- }
-
- if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma_prev(mm, addr, &prev);
- if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
- goto found_addr;
- }
-
- info.flags = 0;
- info.length = len;
- info.low_limit = mm->mmap_legacy_base;
- info.high_limit = mmap_upper_limit(NULL);
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
- addr = vm_unmapped_area(&info);
-
-found_addr:
- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
-
- return addr;
-}
-
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma, *prev;
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int do_color_align, last_mmap;
- struct vm_unmapped_area_info info;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
- last_mmap = GET_LAST_MMAP(filp);
- if (flags & MAP_FIXED) {
- if ((flags & MAP_SHARED) && last_mmap &&
- (addr - shared_align_offset(last_mmap, pgoff))
- & (SHM_COLOUR - 1))
+ if ((flags & MAP_SHARED) && filp &&
+ (addr - shared_align_offset(filp_pgoff, pgoff))
+ & (SHM_COLOUR - 1))
return -EINVAL;
- goto found_addr;
+ return addr;
}
- /* requesting a specific address */
if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+ if (do_color_align)
+ addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
else
addr = PAGE_ALIGN(addr);
@@ -191,87 +138,50 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
- goto found_addr;
+ return addr;
}
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = PAGE_SIZE;
- info.high_limit = mm->mmap_base;
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- goto found_addr;
- VM_BUG_ON(addr != -ENOMEM);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-
-found_addr:
- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
-
- return addr;
-}
-
-static int mmap_is_legacy(void)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- /* parisc stack always grows up - so a unlimited stack should
- * not be an indicator to use the legacy memory layout.
- * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
- * return 1;
- */
-
- return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_rnd(void)
-{
- unsigned long rnd = 0;
-
- if (current->flags & PF_RANDOMIZE)
- rnd = get_random_u32() & MMAP_RND_MASK;
-
- return rnd << PAGE_SHIFT;
-}
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(filp_pgoff, pgoff);
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+ VM_BUG_ON(addr != -ENOMEM);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ }
-unsigned long arch_mmap_rnd(void)
-{
- return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = mmap_upper_limit(NULL);
+ return vm_unmapped_area(&info);
}
-static unsigned long mmap_legacy_base(void)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return arch_get_unmapped_area_common(filp,
+ addr, len, pgoff, flags, UP);
}
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_upper_limit(rlim_stack);
-
- if (mmap_is_legacy()) {
- mm->mmap_base = mm->mmap_legacy_base;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
+ return arch_get_unmapped_area_common(filp,
+ addr, len, pgoff, flags, DOWN);
}
-
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
@@ -379,7 +289,7 @@ asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
((u64)lenhi << 32) | lenlo);
}
-long parisc_personality(unsigned long personality)
+asmlinkage long parisc_personality(unsigned long personality)
{
long err;
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 2a12a547b447..826c8e51b585 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
-
-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
- compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
- const char __user * pathname)
-{
- return sys_fanotify_mark(fanotify_fd, flags,
- ((__u64)mask1 << 32) | mask0,
- dfd, pathname);
-}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 1373e5129868..f58c4bccfbce 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -39,6 +39,7 @@ registers).
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
+#include <asm/spinlock_types.h>
#include <linux/linkage.h>
@@ -66,6 +67,16 @@ registers).
stw \reg1, 0(%sr2,\reg2)
.endm
+ /* raise exception if spinlock content is not zero or
+ * __ARCH_SPIN_LOCK_UNLOCKED_VAL */
+ .macro spinlock_check spin_val,tmpreg
+#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
+ andcm,= \spin_val, \tmpreg, %r0
+ .word SPINLOCK_BREAK_INSN
+#endif
+ .endm
+
.text
.import syscall_exit,code
@@ -232,10 +243,10 @@ linux_gateway_entry:
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -368,10 +379,10 @@ tracesys_next:
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -508,7 +519,8 @@ lws_start:
lws_exit_noerror:
lws_pagefault_enable %r1,%r21
- stw,ma %r20, 0(%sr2,%r20)
+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
+ stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
b lws_exit
copy %r0, %r21
@@ -521,7 +533,8 @@ lws_wouldblock:
lws_pagefault:
lws_pagefault_enable %r1,%r21
- stw,ma %r20, 0(%sr2,%r20)
+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
+ stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 3(%r0),%r28
b lws_exit
@@ -600,6 +613,9 @@ lws_compare_and_swap32:
lws_compare_and_swap:
/* Trigger memory reference interruptions without writing to memory */
1: ldw 0(%r26), %r28
+ proberi (%r26), PRIV_USER, %r28
+ comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */
+ nop
2: stbys,e %r0, 0(%r26)
/* Calculate 8-bit hash index from virtual address */
@@ -619,6 +635,7 @@ lws_compare_and_swap:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
+ spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@@ -753,6 +770,9 @@ cas2_lock_start:
copy %r26, %r28
depi_safe 0, 31, 2, %r28
10: ldw 0(%r28), %r1
+ proberi (%r28), PRIV_USER, %r1
+ comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */
+ nop
11: stbys,e %r0, 0(%r28)
/* Calculate 8-bit hash index from virtual address */
@@ -772,6 +792,7 @@ cas2_lock_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
+ spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@@ -936,41 +957,47 @@ atomic_xchg_begin:
/* 8-bit exchange */
1: ldb 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
2: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 16-bit exchange */
3: ldh 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
4: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 32-bit exchange */
5: ldw 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
b atomic_xchg_start
6: stbys,e %r0, 0(%r23)
nop
nop
- nop
- nop
- nop
/* 64-bit exchange */
#ifdef CONFIG_64BIT
7: ldd 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
8: stdby,e %r0, 0(%r23)
#else
7: ldw 0(%r24), %r20
8: ldw 4(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
9: stbys,e %r0, 0(%r20)
@@ -1001,6 +1028,7 @@ atomic_xchg_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
+ spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@@ -1199,6 +1227,7 @@ atomic_store_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
+ spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@@ -1310,6 +1339,8 @@ ENTRY(sys_call_table)
END(sys_call_table)
#ifdef CONFIG_64BIT
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.align 8
ENTRY(sys_call_table64)
#include <asm/syscall_table_64.h> /* 64-bit syscalls */
@@ -1330,7 +1361,7 @@ ENTRY(lws_lock_start)
/* lws locks */
.rept 256
/* Keep locks aligned at 16-bytes */
- .word 1
+ .word __ARCH_SPIN_LOCK_UNLOCKED_VAL
.word 0
.word 0
.word 0
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 0e42fceb2d5e..39bdacaa530b 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
-98 common recv sys_recv
+98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
-123 common recvfrom sys_recvfrom
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@@ -245,7 +245,7 @@
# 220 was alloc_hugepages
# 221 was free_hugepages
222 common exit_group sys_exit_group
-223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+223 common lookup_dcookie sys_ni_syscall
224 common epoll_create sys_epoll_create
225 common epoll_ctl sys_epoll_ctl
226 common epoll_wait sys_epoll_wait
@@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
@@ -400,6 +400,7 @@
353 common pkey_free sys_pkey_free
354 common rseq sys_rseq
355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
+356 common cacheflush sys_cacheflush
# up to 402 is unassigned and reserved for arch specific syscalls
403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
404 32 clock_settime64 sys_clock_settime sys_clock_settime
@@ -448,3 +449,23 @@
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
+470 common listns sys_listns
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9714fbd7c42d..c17e2249115f 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -1,166 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/arch/parisc/kernel/time.c
+ * Common time service routines for parisc machines.
+ * based on arch/loongarch/kernel/time.c
*
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
- * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
- *
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * Copyright (C) 2024 Helge Deller <deller@gmx.de>
*/
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/rtc.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/sched_clock.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/profile.h>
-#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <linux/ftrace.h>
+#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/param.h>
-#include <asm/pdc.h>
-#include <asm/led.h>
+static u64 cr16_clock_freq;
+static unsigned long clocktick;
-#include <linux/timex.h>
+int time_keeper_id; /* CPU used for timekeeping */
-int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
+static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device);
-static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
+static void parisc_event_handler(struct clock_event_device *dev)
+{
+}
-/*
- * We keep time on PA-RISC Linux by using the Interval Timer which is
- * a pair of registers; one is read-only and one is write-only; both
- * accessed through CR16. The read-only register is 32 or 64 bits wide,
- * and increments by 1 every CPU clock tick. The architecture only
- * guarantees us a rate between 0.5 and 2, but all implementations use a
- * rate of 1. The write-only register is 32-bits wide. When the lowest
- * 32 bits of the read-only register compare equal to the write-only
- * register, it raises a maskable external interrupt. Each processor has
- * an Interval Timer of its own and they are not synchronised.
- *
- * We want to generate an interrupt every 1/HZ seconds. So we program
- * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
- * is programmed with the intended time of the next tick. We can be
- * held off for an arbitrarily long period of time by interrupts being
- * disabled, so we may miss one or more ticks.
- */
-irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt)
{
- unsigned long now;
- unsigned long next_tick;
- unsigned long ticks_elapsed = 0;
- unsigned int cpu = smp_processor_id();
- struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
-
- /* gcc can optimize for "read-only" case with a local clocktick */
- unsigned long cpt = clocktick;
-
- /* Initialize next_tick to the old expected tick time. */
- next_tick = cpuinfo->it_value;
-
- /* Calculate how many ticks have elapsed. */
- now = mfctl(16);
- do {
- ++ticks_elapsed;
- next_tick += cpt;
- } while (next_tick - now > cpt);
-
- /* Store (in CR16 cycles) up to when we are accounting right now. */
- cpuinfo->it_value = next_tick;
-
- /* Go do system house keeping. */
- if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
- ticks_elapsed = 0;
- legacy_timer_tick(ticks_elapsed);
-
- /* Skip clockticks on purpose if we know we would miss those.
- * The new CR16 must be "later" than current CR16 otherwise
- * itimer would not fire until CR16 wrapped - e.g 4 seconds
- * later on a 1Ghz processor. We'll account for the missed
- * ticks on the next timer interrupt.
- * We want IT to fire modulo clocktick even if we miss/skip some.
- * But those interrupts don't in fact get delivered that regularly.
- *
- * "next_tick - now" will always give the difference regardless
- * if one or the other wrapped. If "now" is "bigger" we'll end up
- * with a very large unsigned number.
- */
- now = mfctl(16);
- while (next_tick - now > cpt)
- next_tick += cpt;
-
- /* Program the IT when to deliver the next interrupt.
- * Only bottom 32-bits of next_tick are writable in CR16!
- * Timer interrupt will be delivered at least a few hundred cycles
- * after the IT fires, so if we are too close (<= 8000 cycles) to the
- * next cycle, simply skip it.
- */
- if (next_tick - now <= 8000)
- next_tick += cpt;
- mtctl(next_tick, 16);
+ unsigned long new_cr16;
- return IRQ_HANDLED;
-}
+ new_cr16 = mfctl(16) + delta;
+ mtctl(new_cr16, 16);
+ return 0;
+}
-unsigned long profile_pc(struct pt_regs *regs)
+irqreturn_t timer_interrupt(int irq, void *data)
{
- unsigned long pc = instruction_pointer(regs);
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
- if (regs->gr[0] & PSW_N)
- pc -= 4;
+ cd = &per_cpu(parisc_clockevent_device, cpu);
-#ifdef CONFIG_SMP
- if (in_lock_functions(pc))
- pc = regs->gr[2];
-#endif
+ if (clockevent_state_periodic(cd))
+ parisc_timer_next_event(clocktick, cd);
- return pc;
+ if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd))
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
}
-EXPORT_SYMBOL(profile_pc);
+static int parisc_set_state_oneshot(struct clock_event_device *evt)
+{
+ parisc_timer_next_event(clocktick, evt);
-/* clock source code */
+ return 0;
+}
-static u64 notrace read_cr16(struct clocksource *cs)
+static int parisc_set_state_periodic(struct clock_event_device *evt)
{
- return get_cycles();
+ parisc_timer_next_event(clocktick, evt);
+
+ return 0;
}
-static struct clocksource clocksource_cr16 = {
- .name = "cr16",
- .rating = 300,
- .read = read_cr16,
- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
+static int parisc_set_state_shutdown(struct clock_event_device *evt)
+{
+ return 0;
+}
-void start_cpu_itimer(void)
+void parisc_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
- unsigned long next_tick = mfctl(16) + clocktick;
+ unsigned long min_delta = 0x600; /* XXX */
+ unsigned long max_delta = (1UL << (BITS_PER_LONG - 1));
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(parisc_clockevent_device, cpu);
+
+ cd->name = "cr16_clockevent";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = TIMER_IRQ;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = parisc_set_state_oneshot;
+ cd->set_state_oneshot_stopped = parisc_set_state_shutdown;
+ cd->set_state_periodic = parisc_set_state_periodic;
+ cd->set_state_shutdown = parisc_set_state_shutdown;
+ cd->set_next_event = parisc_timer_next_event;
+ cd->event_handler = parisc_event_handler;
+
+ clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta);
+}
+
+unsigned long notrace profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
- mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+ if (regs->gr[0] & PSW_N)
+ pc -= 4;
+
+#ifdef CONFIG_SMP
+ if (in_lock_functions(pc))
+ pc = regs->gr[2];
+#endif
- per_cpu(cpu_data, cpu).it_value = next_tick;
+ return pc;
}
+EXPORT_SYMBOL(profile_pc);
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
@@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts)
}
}
-
static u64 notrace read_cr16_sched_clock(void)
{
return get_cycles();
}
+static u64 notrace read_cr16(struct clocksource *cs)
+{
+ return get_cycles();
+}
+
+static struct clocksource clocksource_cr16 = {
+ .name = "cr16",
+ .rating = 300,
+ .read = read_cr16,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_VALID_FOR_HRES |
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
+};
+
/*
* timer interrupt and sched_clock() initialization
@@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void)
void __init time_init(void)
{
- unsigned long cr16_hz;
-
- clocktick = (100 * PAGE0->mem_10msec) / HZ;
- start_cpu_itimer(); /* get CPU 0 started */
-
- cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
+ cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */
+ clocktick = cr16_clock_freq / HZ;
/* register as sched_clock source */
- sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
-}
+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq);
-static int __init init_cr16_clocksource(void)
-{
- /*
- * The cr16 interval timers are not synchronized across CPUs.
- */
- if (num_online_cpus() > 1 && !running_on_qemu) {
- clocksource_cr16.name = "cr16_unstable";
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
- }
+ parisc_clockevent_init();
/* register at clocksource framework */
- clocksource_register_hz(&clocksource_cr16,
- 100 * PAGE0->mem_10msec);
-
- return 0;
+ clocksource_register_hz(&clocksource_cr16, cr16_clock_freq);
}
-
-device_initcall(init_cr16_clocksource);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9696fbf646c..4c7c5df80bd0 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -31,12 +31,13 @@
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kfence.h>
+#include <linux/perf_event.h>
#include <asm/assembly.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
@@ -47,6 +48,12 @@
#include <linux/kgdb.h>
#include <linux/kprobes.h>
+#include "unaligned.h"
+
+#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
+#include <asm/spinlock.h>
+#endif
+
#include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task,
@@ -291,24 +298,30 @@ static void handle_break(struct pt_regs *regs)
}
#ifdef CONFIG_KPROBES
- if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
+ if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
parisc_kprobe_break_handler(regs);
return;
}
- if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
+ if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
parisc_kprobe_ss_handler(regs);
return;
}
#endif
#ifdef CONFIG_KGDB
- if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
- iir == PARISC_KGDB_BREAK_INSN)) {
+ if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+ iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
#endif
+#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
+ if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
+ die_if_kernel("Spinlock was trashed", regs, 1);
+ }
+#endif
+
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
@@ -325,10 +338,7 @@ static void default_trap(int code, struct pt_regs *regs)
show_regs(regs);
}
-void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
-
-
-void transfer_pim_to_trap_frame(struct pt_regs *regs)
+static void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
@@ -497,7 +507,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
- regs->iaoq[0] = 0 | 3;
+ regs->iaoq[0] = 0 | PRIV_USER;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
@@ -547,7 +557,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
flush_cache_all();
flush_tlb_all();
- cpu_lpmc(5, regs);
+ default_trap(code, regs);
return;
case PARISC_ITLB_TRAP:
@@ -624,6 +634,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
__inc_irq_stat(irq_fpassist_count);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
handle_fpe(regs);
return;
@@ -790,14 +801,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
-void __init initialize_ivt(const void *iva)
+static void __init initialize_ivt(const void *iva)
{
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
- u32 *hpmcp;
u32 instr;
if (strcmp((const char *)iva, "cows can fly"))
@@ -830,8 +840,6 @@ void __init initialize_ivt(const void *iva)
/* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc);
- hpmcp = (u32 *)os_hpmc;
-
for (i=0; i<8; i++)
check += ivap[i];
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index e8a4d77cff53..fb64d9ce0b17 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -11,8 +11,12 @@
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
+#include <linux/sysctl.h>
+#include <linux/unaligned.h>
+#include <linux/perf_event.h>
#include <asm/hardirq.h>
#include <asm/traps.h>
+#include "unaligned.h"
/* #define DEBUG_UNALIGNED 1 */
@@ -22,7 +26,7 @@
#define DPRINTF(fmt, args...)
#endif
-#define RFMT "%#08lx"
+#define RFMT "0x%08lx"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
@@ -102,6 +106,7 @@
#define ERR_NOTHANDLED -1
int unaligned_enabled __read_mostly = 1;
+int no_unaligned_warning __read_mostly;
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
@@ -118,8 +123,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %2, 23, 24, %0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1)
: "r" (saddr), "r" (regs->isr) );
@@ -150,8 +155,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
" mtctl %2,11\n"
" vshd %0,%3,%0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
: "r" (saddr), "r" (regs->isr) );
@@ -167,6 +172,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
+ unsigned long shift, temp1;
__u64 val = 0;
ASM_EXCEPTIONTABLE_VAR(ret);
@@ -178,25 +184,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
#ifdef CONFIG_64BIT
__asm__ __volatile__ (
-" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
-" mtsp %4, %%sr1\n"
-" depd %%r0,63,3,%3\n"
-"1: ldd 0(%%sr1,%3),%0\n"
-"2: ldd 8(%%sr1,%3),%%r20\n"
-" subi 64,%%r19,%%r19\n"
-" mtsar %%r19\n"
-" shrpd %0,%%r20,%%sar,%0\n"
+" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */
+" mtsp %5, %%sr1\n"
+" depd %%r0,63,3,%2\n"
+"1: ldd 0(%%sr1,%2),%0\n"
+"2: ldd 8(%%sr1,%2),%4\n"
+" subi 64,%3,%3\n"
+" mtsar %3\n"
+" shrpd %0,%4,%%sar,%0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
- : "=r" (val), "+r" (ret)
- : "0" (val), "r" (saddr), "r" (regs->isr)
- : "r19", "r20" );
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ : "r" (regs->isr) );
#else
- {
- unsigned long shift, temp1;
__asm__ __volatile__ (
-" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
+" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */
" mtsp %5, %%sr1\n"
" dep %%r0,31,2,%2\n"
"1: ldw 0(%%sr1,%2),%0\n"
@@ -207,12 +210,11 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
" vshd %0,%R0,%0\n"
" vshd %R0,%4,%R0\n"
"4: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
: "r" (regs->isr) );
- }
#endif
DPRINTF("val = 0x%llx\n", val);
@@ -242,8 +244,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
"1: stb %1, 0(%%sr1, %3)\n"
"2: stb %2, 1(%%sr1, %3)\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret), "=&r" (temp1)
: "r" (val), "r" (regs->ior), "r" (regs->isr) );
@@ -283,8 +285,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
@@ -327,42 +329,41 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
"5: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
#else
{
- unsigned long valh=(val>>32),vall=(val&0xffffffffl);
__asm__ __volatile__ (
-" mtsp %4, %%sr1\n"
-" zdep %2, 29, 2, %%r19\n"
-" dep %%r0, 31, 2, %3\n"
+" mtsp %3, %%sr1\n"
+" zdep %R1, 29, 2, %%r19\n"
+" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
-"1: ldw 0(%%sr1,%3),%%r20\n"
-"2: ldw 8(%%sr1,%3),%%r21\n"
-" vshd %1, %2, %%r1\n"
+"1: ldw 0(%%sr1,%2),%%r20\n"
+"2: ldw 8(%%sr1,%2),%%r21\n"
+" vshd %1, %R1, %%r1\n"
" vshd %%r0, %1, %1\n"
-" vshd %2, %%r0, %2\n"
+" vshd %R1, %%r0, %R1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
-" or %2, %%r21, %2\n"
-"3: stw %1,0(%%sr1,%3)\n"
-"4: stw %%r1,4(%%sr1,%3)\n"
-"5: stw %2,8(%%sr1,%3)\n"
+" or %R1, %%r21, %R1\n"
+"3: stw %1,0(%%sr1,%2)\n"
+"4: stw %%r1,4(%%sr1,%2)\n"
+"5: stw %R1,8(%%sr1,%2)\n"
"6: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
: "+r" (ret)
- : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1" );
}
#endif
@@ -378,6 +379,7 @@ void handle_unaligned(struct pt_regs *regs)
int ret = ERR_NOTHANDLED;
__inc_irq_stat(irq_unaligned_count);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->ior);
/* log a message with pacing */
if (user_mode(regs)) {
@@ -398,6 +400,14 @@ void handle_unaligned(struct pt_regs *regs)
if (!unaligned_enabled)
goto force_sigbus;
+ } else {
+ static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5);
+ if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+ !no_unaligned_warning &&
+ __ratelimit(&kernel_ratelimit))
+ pr_warn("Kernel: unaligned access to " RFMT " in %pS "
+ "(iir " RFMT ")\n",
+ regs->ior, (void *)regs->iaoq[0], regs->iir);
}
/* handle modification - OK, it's ugly, see the instruction manual */
@@ -472,7 +482,7 @@ void handle_unaligned(struct pt_regs *regs)
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
- ret = emulate_ldw(regs, R3(regs->iir),0);
+ ret = emulate_ldw(regs, R3(regs->iir), 0);
break;
case OPCODE_STH:
@@ -481,7 +491,7 @@ void handle_unaligned(struct pt_regs *regs)
case OPCODE_STW:
case OPCODE_STWA:
- ret = emulate_stw(regs, R2(regs->iir),0);
+ ret = emulate_stw(regs, R2(regs->iir), 0);
break;
#ifdef CONFIG_64BIT
@@ -489,12 +499,12 @@ void handle_unaligned(struct pt_regs *regs)
case OPCODE_LDDA_I:
case OPCODE_LDD_S:
case OPCODE_LDDA_S:
- ret = emulate_ldd(regs, R3(regs->iir),0);
+ ret = emulate_ldd(regs, R3(regs->iir), 0);
break;
case OPCODE_STD:
case OPCODE_STDA:
- ret = emulate_std(regs, R2(regs->iir),0);
+ ret = emulate_std(regs, R2(regs->iir), 0);
break;
#endif
@@ -502,24 +512,24 @@ void handle_unaligned(struct pt_regs *regs)
case OPCODE_FLDWS:
case OPCODE_FLDWXR:
case OPCODE_FLDWSR:
- ret = emulate_ldw(regs,FR3(regs->iir),1);
+ ret = emulate_ldw(regs, FR3(regs->iir), 1);
break;
case OPCODE_FLDDX:
case OPCODE_FLDDS:
- ret = emulate_ldd(regs,R3(regs->iir),1);
+ ret = emulate_ldd(regs, R3(regs->iir), 1);
break;
case OPCODE_FSTWX:
case OPCODE_FSTWS:
case OPCODE_FSTWXR:
case OPCODE_FSTWSR:
- ret = emulate_stw(regs,FR3(regs->iir),1);
+ ret = emulate_stw(regs, FR3(regs->iir), 1);
break;
case OPCODE_FSTDX:
case OPCODE_FSTDS:
- ret = emulate_std(regs,R3(regs->iir),1);
+ ret = emulate_std(regs, R3(regs->iir), 1);
break;
case OPCODE_LDCD_I:
diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h
new file mode 100644
index 000000000000..c1aa4b12e284
--- /dev/null
+++ b/arch/parisc/kernel/unaligned.h
@@ -0,0 +1,3 @@
+struct pt_regs;
+void handle_unaligned(struct pt_regs *regs);
+int check_unaligned(struct pt_regs *regs);
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 42acc3b52017..7ac88ff13d3c 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -24,16 +24,19 @@
#include <asm/unwind.h>
#include <asm/switch_to.h>
#include <asm/sections.h>
+#include <asm/ftrace.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) pr_debug(x)
#else
-#define dbg(x...)
+#define dbg(x...) do { } while (0)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
+#define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0)
+
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
@@ -179,7 +182,7 @@ void unwind_table_remove(struct unwind_table *table)
/* Called from setup_arch to import the kernel unwind info */
int __init unwind_init(void)
{
- long start, stop;
+ long start __maybe_unused, stop __maybe_unused;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
@@ -220,7 +223,6 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
* Note: We could use dereference_kernel_function_descriptor()
* instead but we want to keep it simple here.
*/
- extern void * const handle_interruption;
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
@@ -237,13 +239,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
return 1;
}
- if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
- pc_is_kernel_fn(pc, syscall_exit)) {
+ if (pc == (unsigned long)&ret_from_kernel_thread ||
+ pc == (unsigned long)&syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
- if (pc_is_kernel_fn(pc, intr_return)) {
+ if (pc == (unsigned long)&intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
@@ -255,14 +257,17 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
}
if (pc_is_kernel_fn(pc, _switch_to) ||
- pc_is_kernel_fn(pc, _switch_to_ret)) {
+ pc == (unsigned long)&_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
- info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ if (ALIGNMENT_OK(info->prev_sp, long))
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ else
+ info->prev_ip = info->prev_sp = 0;
return 1;
}
#ifdef CONFIG_IRQSTACKS
- if (pc_is_kernel_fn(pc, _call_on_stack)) {
+ if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
@@ -370,8 +375,10 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
- else if (rpoffset)
+ else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long))
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
+ else
+ info->rp = 0;
info->prev_ip = info->rp;
info->rp = 0;
}
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 4459a48d2303..4ee8d17da229 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -1,11 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso32 = note.o sigtramp.o restart_syscall.o
+obj-cvdso32 = vdso32_generic.o
# Build rules
-targets := $(obj-vdso32) vdso32.so
+targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32))
+
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls
# -march=1.1 -mschedule=7100LC
@@ -19,20 +33,19 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name)
obj-y += vdso32_wrapper.o
-extra-y += vdso32.lds
+targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso32ld)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
-
$(obj-cvdso32): %.o: %.c FORCE
$(call if_changed_dep,vdso32cc)
@@ -42,10 +55,10 @@ quiet_cmd_vdso32ld = VDSO32L $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdso32cc = VDSO32C $@
- cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $<
+ cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S
index d4aff3af5262..4273baa26b65 100644
--- a/arch/parisc/kernel/vdso32/vdso32.lds.S
+++ b/arch/parisc/kernel/vdso32/vdso32.lds.S
@@ -106,6 +106,9 @@ VERSION
global:
__kernel_sigtramp_rt32;
__kernel_restart_syscall32;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c
new file mode 100644
index 000000000000..8d5bd59e8646
--- /dev/null
+++ b/arch/parisc/kernel/vdso32/vdso32_generic.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+#include <uapi/asm/unistd_32.h>
+
+struct timezone;
+struct old_timespec32;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime64, (long)clock, (long)ts);
+}
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index f3d6045793f4..c63f4069170f 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -1,12 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso64 = note.o sigtramp.o restart_syscall.o
+obj-cvdso64 = vdso64_generic.o
# Build rules
-targets := $(obj-vdso64) vdso64.so
-obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so
+obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64))
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
@@ -19,28 +32,32 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name)
obj-y += vdso64_wrapper.o
-extra-y += vdso64.lds
+targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso64ld)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
+$(obj-cvdso64): %.o: %.c FORCE
+ $(call if_changed_dep,vdso64cc)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdso64cc = VDSO64C $@
+ cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S
index de1fb4b19286..10f25e4e1554 100644
--- a/arch/parisc/kernel/vdso64/vdso64.lds.S
+++ b/arch/parisc/kernel/vdso64/vdso64.lds.S
@@ -104,6 +104,8 @@ VERSION
global:
__kernel_sigtramp_rt64;
__kernel_restart_syscall64;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c
new file mode 100644
index 000000000000..fc6836a0075b
--- /dev/null
+++ b/arch/parisc/kernel/vdso64/vdso64_generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+
+struct timezone;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 2769eb991f58..b445e47903cf 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -86,7 +86,6 @@ SECTIONS
TEXT_TEXT
LOCK_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -128,9 +127,10 @@ SECTIONS
}
#endif
- RO_DATA(8)
+ RO_DATA(PAGE_SIZE)
/* unwind info */
+ . = ALIGN(4);
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
@@ -155,6 +155,7 @@ SECTIONS
}
/* End of data section */
+ . = ALIGN(PAGE_SIZE);
_edata = .;
/* BSS */
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 36a314199074..9df810050642 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
}
-u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
-
-unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
-{
- unsigned long flags;
- unsigned int prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return (unsigned long)prev;
-}
-
-u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
-{
- unsigned long flags;
- u8 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
+#define CMPXCHG(T) \
+ T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ _atomic_spin_lock_irqsave(ptr, flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ _atomic_spin_unlock_irqrestore(ptr, flags); \
+ return prev; \
+ }
+
+CMPXCHG(u64)
+CMPXCHG(u32)
+CMPXCHG(u16)
+CMPXCHG(u8)
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index 4818f3db84a5..59d8c15d81bd 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -25,15 +25,6 @@
: "=r"(_t) \
: "r"(_r), "0"(_t));
-static inline unsigned short from32to16(unsigned int x)
-{
- /* 32 bits --> 16 bits + carry */
- x = (x & 0xffff) + (x >> 16);
- /* 16 bits + carry --> 16 bits including carry */
- x = (x & 0xffff) + (x >> 16);
- return (unsigned short)x;
-}
-
static inline unsigned int do_csum(const unsigned char * buff, int len)
{
int odd, count;
@@ -85,7 +76,7 @@ static inline unsigned int do_csum(const unsigned char * buff, int len)
}
if (len & 1)
result += le16_to_cpu(*buff);
- result = from32to16(result);
+ result = csum_from32to16(result);
if (odd)
result = swab16(result);
out:
@@ -102,7 +93,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
addc(result, sum);
- return (__force __wsum)from32to16(result);
+ return (__force __wsum)csum_from32to16(result);
}
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c
index 7c00496b47d4..3c7e617f5a93 100644
--- a/arch/parisc/lib/io.c
+++ b/arch/parisc/lib/io.c
@@ -12,114 +12,6 @@
#include <linux/module.h>
#include <asm/io.h>
-/* Copies a block of memory to a device in an efficient manner.
- * Assumes the device can cope with 32-bit transfers. If it can't,
- * don't use this function.
- */
-void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
-{
- if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
- goto bytecopy;
- while ((unsigned long)dst & 3) {
- writeb(*(char *)src, dst++);
- src++;
- count--;
- }
- while (count > 3) {
- __raw_writel(*(u32 *)src, dst);
- src += 4;
- dst += 4;
- count -= 4;
- }
- bytecopy:
- while (count--) {
- writeb(*(char *)src, dst++);
- src++;
- }
-}
-
-/*
-** Copies a block of memory from a device in an efficient manner.
-** Assumes the device can cope with 32-bit transfers. If it can't,
-** don't use this function.
-**
-** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
-** 27341/64 = 427 cyc per int
-** 61311/128 = 478 cyc per short
-** 122637/256 = 479 cyc per byte
-** Ergo bus latencies dominant (not transfer size).
-** Minimize total number of transfers at cost of CPU cycles.
-** TODO: only look at src alignment and adjust the stores to dest.
-*/
-void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
-{
- /* first compare alignment of src/dst */
- if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
- goto bytecopy;
-
- if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
- goto shortcopy;
-
- /* Then check for misaligned start address */
- if ((unsigned long)src & 1) {
- *(u8 *)dst = readb(src);
- src++;
- dst++;
- count--;
- if (count < 2) goto bytecopy;
- }
-
- if ((unsigned long)src & 2) {
- *(u16 *)dst = __raw_readw(src);
- src += 2;
- dst += 2;
- count -= 2;
- }
-
- while (count > 3) {
- *(u32 *)dst = __raw_readl(src);
- dst += 4;
- src += 4;
- count -= 4;
- }
-
- shortcopy:
- while (count > 1) {
- *(u16 *)dst = __raw_readw(src);
- src += 2;
- dst += 2;
- count -= 2;
- }
-
- bytecopy:
- while (count--) {
- *(char *)dst = readb(src);
- src++;
- dst++;
- }
-}
-
-/* Sets a block of memory on a device to a given value.
- * Assumes the device can cope with 32-bit transfers. If it can't,
- * don't use this function.
- */
-void memset_io(volatile void __iomem *addr, unsigned char val, int count)
-{
- u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
- while ((unsigned long)addr & 3) {
- writeb(val, addr++);
- count--;
- }
- while (count > 3) {
- __raw_writel(val32, addr);
- addr += 4;
- count -= 4;
- }
- while (count--) {
- writeb(val, addr++);
- }
-}
-
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
@@ -170,15 +62,15 @@ void insw (unsigned long port, void *dst, unsigned long count)
unsigned char *p;
p = (unsigned char *)dst;
-
+
if (!count)
return;
-
+
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
-
+
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
@@ -189,13 +81,13 @@ void insw (unsigned long port, void *dst, unsigned long count)
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
-
+
case 0x02: /* Buffer 16-bit aligned */
*(unsigned short *)p = cpu_to_le16(inw(port));
p += 2;
count--;
while (count>=2) {
-
+
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
@@ -206,13 +98,13 @@ void insw (unsigned long port, void *dst, unsigned long count)
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
-
+
case 0x01: /* Buffer 8-bit aligned */
case 0x03:
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
--count;
-
+
l = cpu_to_le16(inw(port));
*p = l >> 8;
p++;
@@ -242,10 +134,10 @@ void insl (unsigned long port, void *dst, unsigned long count)
unsigned char *p;
p = (unsigned char *)dst;
-
+
if (!count)
return;
-
+
switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
@@ -255,14 +147,14 @@ void insl (unsigned long port, void *dst, unsigned long count)
p += 4;
}
break;
-
+
case 0x02: /* Buffer 16-bit aligned */
--count;
-
+
l = cpu_to_le32(inl(port));
*(unsigned short *)p = l >> 16;
p += 2;
-
+
while (count--)
{
l2 = cpu_to_le32(inl(port));
@@ -274,7 +166,7 @@ void insl (unsigned long port, void *dst, unsigned long count)
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
-
+
l = cpu_to_le32(inl(port));
*(unsigned char *)p = l >> 24;
p++;
@@ -291,7 +183,7 @@ void insl (unsigned long port, void *dst, unsigned long count)
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
-
+
l = cpu_to_le32(inl(port));
*p = l >> 24;
p++;
@@ -340,10 +232,10 @@ void outsw (unsigned long port, const void *src, unsigned long count)
const unsigned char *p;
p = (const unsigned char *)src;
-
+
if (!count)
return;
-
+
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
@@ -358,13 +250,13 @@ void outsw (unsigned long port, const void *src, unsigned long count)
outw(le16_to_cpu(*(unsigned short*)p), port);
}
break;
-
+
case 0x02: /* Buffer 16-bit aligned */
-
+
outw(le16_to_cpu(*(unsigned short*)p), port);
p += 2;
count--;
-
+
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
@@ -376,11 +268,11 @@ void outsw (unsigned long port, const void *src, unsigned long count)
outw(le16_to_cpu(*(unsigned short *)p), port);
}
break;
-
- case 0x01: /* Buffer 8-bit aligned */
+
+ case 0x01: /* Buffer 8-bit aligned */
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
-
+
l = *p << 8;
p++;
count--;
@@ -395,7 +287,7 @@ void outsw (unsigned long port, const void *src, unsigned long count)
l2 = *(unsigned char *)p;
outw (le16_to_cpu(l | l2>>8), port);
break;
-
+
}
}
@@ -412,10 +304,10 @@ void outsl (unsigned long port, const void *src, unsigned long count)
const unsigned char *p;
p = (const unsigned char *)src;
-
+
if (!count)
return;
-
+
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
@@ -425,13 +317,13 @@ void outsl (unsigned long port, const void *src, unsigned long count)
p += 4;
}
break;
-
+
case 0x02: /* Buffer 16-bit aligned */
--count;
-
+
l = *(unsigned short *)p;
p += 2;
-
+
while (count--)
{
l2 = *(unsigned int *)p;
@@ -462,7 +354,7 @@ void outsl (unsigned long port, const void *src, unsigned long count)
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
-
+
l = *p << 24;
p++;
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 5fc0c852c84c..03165c82dfdb 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#define get_user_space() mfsp(SR_USER)
#define get_kernel_space() SR_KERNEL
@@ -32,9 +33,24 @@ EXPORT_SYMBOL(raw_copy_to_user);
unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len)
{
+ unsigned long start = (unsigned long) src;
+ unsigned long end = start + len;
+ unsigned long newlen = len;
+
mtsp(get_user_space(), SR_TEMP1);
mtsp(get_kernel_space(), SR_TEMP2);
- return pa_memcpy(dst, (void __force *)src, len);
+
+ /* Check region is user accessible */
+ while (start < end) {
+ if (!prober_user(SR_TEMP1, start)) {
+ newlen = (start - (unsigned long) src);
+ break;
+ }
+ start += PAGE_SIZE;
+ /* align to page boundry which may have different permission */
+ start = PAGE_ALIGN_DOWN(start);
+ }
+ return len - newlen + pa_memcpy(dst, (void __force *)src, newlen);
}
EXPORT_SYMBOL(raw_copy_from_user);
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
index 8e6014a142ef..9d8b4dbae273 100644
--- a/arch/parisc/lib/ucmpdi2.c
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
+#include <linux/libgcc.h>
union ull_union {
unsigned long long ull;
@@ -9,7 +10,7 @@ union ull_union {
} ui;
};
-int __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type __ucmpdi2(unsigned long long a, unsigned long long b)
{
union ull_union au = {.ull = a};
union ull_union bu = {.ull = b};
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index 3747a0cbd3b8..7b64740e150a 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -6,7 +6,8 @@
# See arch/parisc/math-emu/README
ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
- -Wno-implicit-int
+ -Wno-implicit-int -Wno-missing-prototypes -Wno-missing-declarations \
+ -Wno-old-style-definition -Wno-unused-but-set-variable
obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
dfmpy.o sfmpy.o sfsqrt.o dfsqrt.o dfadd.o fmpyfadd.o \
diff --git a/arch/parisc/math-emu/dfsqrt.c b/arch/parisc/math-emu/dfsqrt.c
index 63d339c81c14..e3a3a19b966b 100644
--- a/arch/parisc/math-emu/dfsqrt.c
+++ b/arch/parisc/math-emu/dfsqrt.c
@@ -15,7 +15,7 @@
* Double Floating-point Square Root
*
* External Interfaces:
- * dbl_fsqrt(srcptr,nullptr,dstptr,status)
+ * dbl_fsqrt(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -37,7 +37,7 @@
unsigned int
dbl_fsqrt(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
index 6ce427b58836..71829cb7bc81 100644
--- a/arch/parisc/math-emu/driver.c
+++ b/arch/parisc/math-emu/driver.c
@@ -26,12 +26,6 @@
#define FPUDEBUG 0
-/* Format of the floating-point exception registers. */
-struct exc_reg {
- unsigned int exception : 6;
- unsigned int ei : 26;
-};
-
/* Macros for grabbing bits of the instruction format from the 'ei'
field above. */
/* Major opcode 0c and 0e */
@@ -103,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
- force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
- (void __user *) regs->iaoq[0]);
- return -1;
+ int sig = signalcode >> 24;
+
+ if (sig == SIGFPE) {
+ /*
+ * Clear floating point trap bit to avoid trapping
+ * again on the first floating-point instruction in
+ * the userspace signal handler.
+ */
+ regs->fr[0] &= ~(1ULL << 38);
+ }
+ force_sig_fault(sig, signalcode & 0xffffff,
+ (void __user *) regs->iaoq[0]);
+ return -1;
}
return signalcode ? -1 : 0;
diff --git a/arch/parisc/math-emu/fcnvff.c b/arch/parisc/math-emu/fcnvff.c
index 0530e6127797..61e489704c86 100644
--- a/arch/parisc/math-emu/fcnvff.c
+++ b/arch/parisc/math-emu/fcnvff.c
@@ -16,8 +16,8 @@
* Double Floating-point to Single Floating-point
*
* External Interfaces:
- * dbl_to_sgl_fcnvff(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvff(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvff(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvff(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -40,7 +40,7 @@
int
sgl_to_dbl_fcnvff(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
@@ -127,7 +127,7 @@ sgl_to_dbl_fcnvff(
int
dbl_to_sgl_fcnvff(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/fcnvfu.c b/arch/parisc/math-emu/fcnvfu.c
index c971618a6f3c..c31790ceecca 100644
--- a/arch/parisc/math-emu/fcnvfu.c
+++ b/arch/parisc/math-emu/fcnvfu.c
@@ -15,10 +15,10 @@
* Floating-point to Unsigned Fixed-point Converts
*
* External Interfaces:
- * dbl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvfu(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfu(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfu(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfu(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -45,7 +45,7 @@
int
sgl_to_sgl_fcnvfu(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
unsigned int *dstptr,
unsigned int *status)
{
@@ -166,7 +166,7 @@ sgl_to_sgl_fcnvfu(
int
sgl_to_dbl_fcnvfu(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_unsigned *dstptr,
unsigned int *status)
{
@@ -285,7 +285,7 @@ sgl_to_dbl_fcnvfu(
*/
/*ARGSUSED*/
int
-dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *_nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int srcp1, srcp2, result;
@@ -408,7 +408,7 @@ dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
*/
/*ARGSUSED*/
int
-dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *_nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
diff --git a/arch/parisc/math-emu/fcnvfut.c b/arch/parisc/math-emu/fcnvfut.c
index 5b657f852578..2cf1daf3b7ad 100644
--- a/arch/parisc/math-emu/fcnvfut.c
+++ b/arch/parisc/math-emu/fcnvfut.c
@@ -15,10 +15,10 @@
* Floating-point to Unsigned Fixed-point Converts with Truncation
*
* External Interfaces:
- * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvfut(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfut(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfut(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfut(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -44,7 +44,7 @@
*/
/*ARGSUSED*/
int
-sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *_nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int src, result;
@@ -113,7 +113,7 @@ sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
*/
/*ARGSUSED*/
int
-sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *_nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
@@ -183,7 +183,7 @@ sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
*/
/*ARGSUSED*/
int
-dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *_nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int srcp1, srcp2, result;
@@ -252,7 +252,7 @@ dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
*/
/*ARGSUSED*/
int
-dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *_nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
diff --git a/arch/parisc/math-emu/fcnvfx.c b/arch/parisc/math-emu/fcnvfx.c
index 5e153078d803..99bd61479452 100644
--- a/arch/parisc/math-emu/fcnvfx.c
+++ b/arch/parisc/math-emu/fcnvfx.c
@@ -18,10 +18,10 @@
* Double Floating-point to Double Fixed-point
*
* External Interfaces:
- * dbl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvfx(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfx(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfx(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfx(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -44,7 +44,7 @@
int
sgl_to_sgl_fcnvfx(
sgl_floating_point *srcptr,
- sgl_floating_point *nullptr,
+ sgl_floating_point *_nullptr,
int *dstptr,
sgl_floating_point *status)
{
@@ -141,7 +141,7 @@ sgl_to_sgl_fcnvfx(
int
sgl_to_dbl_fcnvfx(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
@@ -262,7 +262,7 @@ sgl_to_dbl_fcnvfx(
int
dbl_to_sgl_fcnvfx(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
int *dstptr,
unsigned int *status)
{
@@ -373,7 +373,7 @@ dbl_to_sgl_fcnvfx(
int
dbl_to_dbl_fcnvfx(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/fcnvfxt.c b/arch/parisc/math-emu/fcnvfxt.c
index ebec31e40d01..3b7cc62257d0 100644
--- a/arch/parisc/math-emu/fcnvfxt.c
+++ b/arch/parisc/math-emu/fcnvfxt.c
@@ -18,10 +18,10 @@
* Double Floating-point to Double Fixed-point /w truncated result
*
* External Interfaces:
- * dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvfxt(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfxt(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfxt(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfxt(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -45,7 +45,7 @@
int
sgl_to_sgl_fcnvfxt(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
int *dstptr,
unsigned int *status)
{
@@ -109,7 +109,7 @@ sgl_to_sgl_fcnvfxt(
int
sgl_to_dbl_fcnvfxt(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
@@ -183,7 +183,7 @@ sgl_to_dbl_fcnvfxt(
int
dbl_to_sgl_fcnvfxt(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
int *dstptr,
unsigned int *status)
{
@@ -248,7 +248,7 @@ dbl_to_sgl_fcnvfxt(
int
dbl_to_dbl_fcnvfxt(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/fcnvuf.c b/arch/parisc/math-emu/fcnvuf.c
index c54978a0ace1..c166feb57045 100644
--- a/arch/parisc/math-emu/fcnvuf.c
+++ b/arch/parisc/math-emu/fcnvuf.c
@@ -15,10 +15,10 @@
* Fixed point to Floating-point Converts
*
* External Interfaces:
- * dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvuf(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvuf(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvuf(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvuf(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -45,7 +45,7 @@
int
sgl_to_sgl_fcnvuf(
unsigned int *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
@@ -104,7 +104,7 @@ sgl_to_sgl_fcnvuf(
int
sgl_to_dbl_fcnvuf(
unsigned int *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
@@ -145,7 +145,7 @@ sgl_to_dbl_fcnvuf(
int
dbl_to_sgl_fcnvuf(
dbl_unsigned *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
@@ -227,7 +227,7 @@ dbl_to_sgl_fcnvuf(
int
dbl_to_dbl_fcnvuf(
dbl_unsigned *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/fcnvxf.c b/arch/parisc/math-emu/fcnvxf.c
index 69401797146b..11bc1e8a13aa 100644
--- a/arch/parisc/math-emu/fcnvxf.c
+++ b/arch/parisc/math-emu/fcnvxf.c
@@ -18,10 +18,10 @@
* Double Fixed-point to Double Floating-point
*
* External Interfaces:
- * dbl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
- * dbl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
- * sgl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
- * sgl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
+ * dbl_to_dbl_fcnvxf(srcptr,_nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvxf(srcptr,_nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvxf(srcptr,_nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvxf(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -44,7 +44,7 @@
int
sgl_to_sgl_fcnvxf(
int *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
@@ -115,7 +115,7 @@ sgl_to_sgl_fcnvxf(
int
sgl_to_dbl_fcnvxf(
int *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
@@ -166,7 +166,7 @@ sgl_to_dbl_fcnvxf(
int
dbl_to_sgl_fcnvxf(
dbl_integer *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
@@ -271,7 +271,7 @@ dbl_to_sgl_fcnvxf(
int
dbl_to_dbl_fcnvxf(
dbl_integer *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/frnd.c b/arch/parisc/math-emu/frnd.c
index 0b0e8493e08a..825d89650c2d 100644
--- a/arch/parisc/math-emu/frnd.c
+++ b/arch/parisc/math-emu/frnd.c
@@ -14,8 +14,8 @@
* Quad Floating-point Round to Integer (returns unimplemented)
*
* External Interfaces:
- * dbl_frnd(srcptr,nullptr,dstptr,status)
- * sgl_frnd(srcptr,nullptr,dstptr,status)
+ * dbl_frnd(srcptr,_nullptr,dstptr,status)
+ * sgl_frnd(srcptr,_nullptr,dstptr,status)
*
* END_DESC
*/
@@ -33,7 +33,7 @@
/*ARGSUSED*/
int
sgl_frnd(sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
@@ -138,7 +138,7 @@ sgl_frnd(sgl_floating_point *srcptr,
int
dbl_frnd(
dbl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/math-emu/sfsqrt.c b/arch/parisc/math-emu/sfsqrt.c
index bd6a84f468d8..8e9e023e7b2e 100644
--- a/arch/parisc/math-emu/sfsqrt.c
+++ b/arch/parisc/math-emu/sfsqrt.c
@@ -15,7 +15,7 @@
* Single Floating-point Square Root
*
* External Interfaces:
- * sgl_fsqrt(srcptr,nullptr,dstptr,status)
+ * sgl_fsqrt(srcptr,_nullptr,dstptr,status)
*
* Internal Interfaces:
*
@@ -37,7 +37,7 @@
unsigned int
sgl_fsqrt(
sgl_floating_point *srcptr,
- unsigned int *nullptr,
+ unsigned int *_nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 869204e97ec9..f1785640b049 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
- * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
- * -EFAULT to report a userspace access error.
+ * that the register encoded in the "or %r0,%r0,register"
+ * opcode should be loaded with -EFAULT to report a userspace
+ * access error.
*/
if (fix->fixup & 1) {
- regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
+ int fault_error_reg = fix->err_opcode & 0x1f;
+ if (!WARN_ON(!fault_error_reg))
+ regs->gr[fault_error_reg] = -EFAULT;
+ pr_debug("Unalignment fixup of register %d at %pS\n",
+ fault_error_reg, (void*)regs->iaoq[0]);
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
@@ -192,31 +197,31 @@ int fixup_exception(struct pt_regs *regs)
* For implementation see handle_interruption() in traps.c
*/
static const char * const trap_description[] = {
- [1] "High-priority machine check (HPMC)",
- [2] "Power failure interrupt",
- [3] "Recovery counter trap",
- [5] "Low-priority machine check",
- [6] "Instruction TLB miss fault",
- [7] "Instruction access rights / protection trap",
- [8] "Illegal instruction trap",
- [9] "Break instruction trap",
- [10] "Privileged operation trap",
- [11] "Privileged register trap",
- [12] "Overflow trap",
- [13] "Conditional trap",
- [14] "FP Assist Exception trap",
- [15] "Data TLB miss fault",
- [16] "Non-access ITLB miss fault",
- [17] "Non-access DTLB miss fault",
- [18] "Data memory protection/unaligned access trap",
- [19] "Data memory break trap",
- [20] "TLB dirty bit trap",
- [21] "Page reference trap",
- [22] "Assist emulation trap",
- [25] "Taken branch trap",
- [26] "Data memory access rights trap",
- [27] "Data memory protection ID trap",
- [28] "Unaligned data reference trap",
+ [1] = "High-priority machine check (HPMC)",
+ [2] = "Power failure interrupt",
+ [3] = "Recovery counter trap",
+ [5] = "Low-priority machine check",
+ [6] = "Instruction TLB miss fault",
+ [7] = "Instruction access rights / protection trap",
+ [8] = "Illegal instruction trap",
+ [9] = "Break instruction trap",
+ [10] = "Privileged operation trap",
+ [11] = "Privileged register trap",
+ [12] = "Overflow trap",
+ [13] = "Conditional trap",
+ [14] = "FP Assist Exception trap",
+ [15] = "Data TLB miss fault",
+ [16] = "Non-access ITLB miss fault",
+ [17] = "Non-access DTLB miss fault",
+ [18] = "Data memory protection/unaligned access trap",
+ [19] = "Data memory break trap",
+ [20] = "TLB dirty bit trap",
+ [21] = "Page reference trap",
+ [22] = "Assist emulation trap",
+ [25] = "Taken branch trap",
+ [26] = "Data memory access rights trap",
+ [27] = "Data memory protection ID trap",
+ [28] = "Unaligned data reference trap",
};
const char *trap_name(unsigned long code)
@@ -288,15 +293,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
retry:
mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
- if (!vma || address < vma->vm_start)
- goto check_expansion;
+ if (!vma || address < vma->vm_start) {
+ if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP))
+ goto bad_area;
+ vma = expand_stack(mm, address);
+ if (!vma)
+ goto bad_area_nosemaphore;
+ }
+
/*
* Ok, we have a good vm_area for this memory access. We still need to
* check the access permissions.
*/
-good_area:
-
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
@@ -308,8 +317,13 @@ good_area:
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs)) {
+ msg = "Page fault: fault signal on kernel memory";
+ goto no_context;
+ }
return;
+ }
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
@@ -342,17 +356,17 @@ good_area:
mmap_read_unlock(mm);
return;
-check_expansion:
- vma = prev_vma;
- if (vma && (expand_stack(vma, address) == 0))
- goto good_area;
-
/*
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
+ if (!user_mode(regs) && fixup_exception(regs)) {
+ return;
+ }
+
if (user_mode(regs)) {
int signo, si_code;
@@ -444,7 +458,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
{
unsigned long insn = regs->iir;
int breg, treg, xreg, val = 0;
- struct vm_area_struct *vma, *prev_vma;
+ struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long address;
@@ -480,7 +494,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
/* Search for VMA */
address = regs->ior;
mmap_read_lock(mm);
- vma = find_vma_prev(mm, address, &prev_vma);
+ vma = vma_lookup(mm, address);
mmap_read_unlock(mm);
/*
@@ -489,7 +503,6 @@ handle_nadtlb_fault(struct pt_regs *regs)
*/
acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
if (vma
- && address >= vma->vm_start
&& (vma->vm_flags & acc_type) == acc_type)
val = 1;
}
diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c
index cc15d737fda6..ae3493dae9dc 100644
--- a/arch/parisc/mm/fixmap.c
+++ b/arch/parisc/mm/fixmap.c
@@ -19,9 +19,6 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
- if (pmd_none(*pmd))
- pte = pte_alloc_kernel(pmd, vaddr);
-
pte = pte_offset_kernel(pmd, vaddr);
set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index d1d3990b83f6..a94fe546d434 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -21,27 +21,6 @@
#include <asm/mmu_context.h>
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (flags & MAP_FIXED)
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
-
- if (addr)
- addr = ALIGN(addr, huge_page_size(h));
-
- /* we need to make sure the colouring is OK */
- return arch_get_unmapped_area(file, addr, len, pgoff, flags);
-}
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -66,7 +45,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
+ pte = pte_alloc_huge(mm, pmd, addr);
}
return pte;
}
@@ -90,7 +69,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_huge(pmd, addr);
}
}
}
@@ -140,14 +119,14 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry, unsigned long sz)
{
__set_huge_pte_at(mm, addr, ptep, entry);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t entry;
@@ -180,14 +159,3 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
}
return changed;
}
-
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0c43f3b0a5f..14270715d754 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
+#include <linux/execmem.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -32,6 +33,8 @@
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
+#include <asm/asm-offsets.h>
+#include <asm/shmbuf.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@@ -374,10 +377,8 @@ static void __ref map_pages(unsigned long start_vaddr,
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_TABLE_ORDER);
- if (!pmd)
- panic("pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
}
#endif
@@ -385,9 +386,7 @@ static void __ref map_pages(unsigned long start_vaddr,
pmd = pmd_offset(pud, vaddr);
for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
if (pmd_none(*pmd)) {
- pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pg_table)
- panic("page table allocation failed\n");
+ pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(NULL, pmd, pg_table);
}
@@ -456,7 +455,6 @@ void free_initmem(void)
unsigned long kernel_end = (unsigned long)&_end;
/* Remap kernel text and data, but do not touch init section yet. */
- kernel_set_to_readonly = true;
map_pages(init_end, __pa(init_end), kernel_end - init_end,
PAGE_KERNEL, 0);
@@ -479,7 +477,7 @@ void free_initmem(void)
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
-
+
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
@@ -490,11 +488,18 @@ void free_initmem(void)
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- /* rodata memory was already mapped with KERNEL_RO access rights by
- pagetable_init() and map_pages(). No need to do additional stuff here */
- unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
+ unsigned long start = (unsigned long) &__start_rodata;
+ unsigned long end = (unsigned long) &__end_rodata;
+
+ pr_info("Write protecting the kernel read-only data: %luk\n",
+ (end - start) >> 10);
+
+ kernel_set_to_readonly = true;
+ map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0);
- pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
+ /* force the kernel to see the new page table entries */
+ flush_cache_all();
+ flush_tlb_all();
}
#endif
@@ -523,10 +528,6 @@ void mark_rodata_ro(void)
void *parisc_vmalloc_start __ro_after_init;
EXPORT_SYMBOL(parisc_vmalloc_start);
-#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start __ro_after_init;
-#endif
-
void __init mem_init(void)
{
/* Do sanity checks on IPC (compat) structures */
@@ -561,10 +562,6 @@ void __init mem_init(void)
BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
#endif
- high_memory = __va((max_pfn << PAGE_SHIFT));
- set_max_mapnr(max_low_pfn);
- memblock_free_all();
-
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
@@ -626,12 +623,10 @@ static void __init pagetable_init(void)
for (range = 0; range < npmem_ranges; range++) {
unsigned long start_paddr;
- unsigned long end_paddr;
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
size = pmem_ranges[range].pages << PAGE_SHIFT;
- end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
@@ -645,9 +640,7 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("zero page allocation failed.\n");
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
@@ -671,6 +664,35 @@ static void __init gateway_init(void)
PAGE_SIZE, PAGE_GATEWAY, 1);
}
+static void __init fixmap_init(void)
+{
+ unsigned long addr = FIXMAP_START;
+ unsigned long end = FIXMAP_START + FIXMAP_SIZE;
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
+ pmd_t *pmd;
+
+ BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
+
+#if CONFIG_PGTABLE_LEVELS == 3
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
+ PAGE_SIZE << PMD_TABLE_ORDER);
+ pud_populate(NULL, pud, pmd);
+ }
+#endif
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ addr += PAGE_SIZE;
+ } while (addr < end);
+}
+
static void __init parisc_bootmem_free(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
@@ -685,6 +707,7 @@ void __init paging_init(void)
setup_bootmem();
pagetable_init();
gateway_init();
+ fixmap_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
@@ -692,6 +715,77 @@ void __init paging_init(void)
parisc_bootmem_free();
}
+static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
+ unsigned long entry_info)
+{
+ const int slot_max = btlb_info.fixed_range_info.num_comb;
+ int min_num_pages = btlb_info.min_size;
+ unsigned long size;
+
+ /* map at minimum 4 pages */
+ if (min_num_pages < 4)
+ min_num_pages = 4;
+
+ size = HUGEPAGE_SIZE;
+ while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
+ /* starting address must have same alignment as size! */
+ /* if correctly aligned and fits in double size, increase */
+ if (((start & (2 * size - 1)) == 0) &&
+ (end - start) >= (2 * size)) {
+ size <<= 1;
+ continue;
+ }
+ /* if current size alignment is too big, try smaller size */
+ if ((start & (size - 1)) != 0) {
+ size >>= 1;
+ continue;
+ }
+ if ((end - start) >= size) {
+ if ((size >> PAGE_SHIFT) >= min_num_pages)
+ pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, entry_info, *slot);
+ (*slot)++;
+ start += size;
+ continue;
+ }
+ size /= 2;
+ continue;
+ }
+}
+
+void btlb_init_per_cpu(void)
+{
+ unsigned long s, t, e;
+ int slot;
+
+ /* BTLBs are not available on 64-bit CPUs */
+ if (IS_ENABLED(CONFIG_PA20))
+ return;
+ else if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+
+ /* insert BLTLBs for code and data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_stext);
+ e = (uintptr_t) dereference_function_descriptor(&_etext);
+ t = (uintptr_t) dereference_function_descriptor(&_sdata);
+ BUG_ON(t != e);
+
+ /* code segments */
+ slot = 0;
+ alloc_btlb(s, e, &slot, 0x13800000);
+
+ /* sanity check */
+ t = (uintptr_t) dereference_function_descriptor(&_edata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_start);
+ BUG_ON(t != e);
+
+ /* data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_sdata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
+ alloc_btlb(s, e, &slot, 0x11800000);
+}
+
#ifdef CONFIG_PA20
/*
@@ -891,3 +985,23 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
};
DECLARE_VM_GET_PAGE_PROT
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL_RWX,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 345ff0b66499..0b65c4b3baee 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -13,25 +13,9 @@
#include <linux/io.h>
#include <linux/mm.h>
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot)
{
- void __iomem *addr;
- struct vm_struct *area;
- unsigned long offset, last_addr;
- pgprot_t pgprot;
-
#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
@@ -40,11 +24,6 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
phys_addr |= F_EXTEND(0xfc000000);
#endif
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
@@ -62,39 +41,6 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
}
}
- pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
- _PAGE_ACCESSED | _PAGE_NO_CACHE);
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
-
- addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
- phys_addr, pgprot)) {
- vunmap(addr);
- return NULL;
- }
-
- return (void __iomem *) (offset + (char __iomem *)addr);
-}
-EXPORT_SYMBOL(ioremap);
-
-void iounmap(const volatile void __iomem *io_addr)
-{
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
-
- if (is_vmalloc_addr((void *)addr))
- vunmap((void *)addr);
+ return generic_ioremap_prot(phys_addr, size, prot);
}
-EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/parisc/net/Makefile b/arch/parisc/net/Makefile
new file mode 100644
index 000000000000..22b12024d4c3
--- /dev/null
+++ b/arch/parisc/net/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o
+
+ifeq ($(CONFIG_64BIT),y)
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
+else
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
+endif
diff --git a/arch/parisc/net/bpf_jit.h b/arch/parisc/net/bpf_jit.h
new file mode 100644
index 000000000000..8b8896959f04
--- /dev/null
+++ b/arch/parisc/net/bpf_jit.h
@@ -0,0 +1,479 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common functionality for PARISC32 and PARISC64 BPF JIT compilers
+ *
+ * Copyright (c) 2023 Helge Deller <deller@gmx.de>
+ *
+ */
+
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <asm/cacheflush.h>
+
+#define HPPA_JIT_DEBUG 0
+#define HPPA_JIT_REBOOT 0
+#define HPPA_JIT_DUMP 0
+
+#define OPTIMIZE_HPPA 1 /* enable some asm optimizations */
+// echo 1 > /proc/sys/net/core/bpf_jit_enable
+
+#define HPPA_R(nr) nr /* use HPPA register #nr */
+
+enum {
+ HPPA_REG_ZERO = 0, /* The constant value 0 */
+ HPPA_REG_R1 = 1, /* used for addil */
+ HPPA_REG_RP = 2, /* Return address */
+
+ HPPA_REG_ARG7 = 19, /* ARG4-7 used in 64-bit ABI */
+ HPPA_REG_ARG6 = 20,
+ HPPA_REG_ARG5 = 21,
+ HPPA_REG_ARG4 = 22,
+
+ HPPA_REG_ARG3 = 23, /* ARG0-3 in 32- and 64-bit ABI */
+ HPPA_REG_ARG2 = 24,
+ HPPA_REG_ARG1 = 25,
+ HPPA_REG_ARG0 = 26,
+ HPPA_REG_GP = 27, /* Global pointer */
+ HPPA_REG_RET0 = 28, /* Return value, HI in 32-bit */
+ HPPA_REG_RET1 = 29, /* Return value, LOW in 32-bit */
+ HPPA_REG_SP = 30, /* Stack pointer */
+ HPPA_REG_R31 = 31,
+
+#ifdef CONFIG_64BIT
+ HPPA_REG_TCC = 3,
+ HPPA_REG_TCC_SAVED = 4,
+ HPPA_REG_TCC_IN_INIT = HPPA_REG_R31,
+#else
+ HPPA_REG_TCC = 18,
+ HPPA_REG_TCC_SAVED = 17,
+ HPPA_REG_TCC_IN_INIT = HPPA_REG_R31,
+#endif
+
+ HPPA_REG_T0 = HPPA_REG_R1, /* Temporaries */
+ HPPA_REG_T1 = HPPA_REG_R31,
+ HPPA_REG_T2 = HPPA_REG_ARG4,
+#ifndef CONFIG_64BIT
+ HPPA_REG_T3 = HPPA_REG_ARG5, /* not used in 64-bit */
+ HPPA_REG_T4 = HPPA_REG_ARG6,
+ HPPA_REG_T5 = HPPA_REG_ARG7,
+#endif
+};
+
+struct hppa_jit_context {
+ struct bpf_prog *prog;
+ u32 *insns; /* HPPA insns */
+ int ninsns;
+ int reg_seen_collect;
+ int reg_seen;
+ int body_len;
+ int epilogue_offset;
+ int prologue_len;
+ int *offset; /* BPF to HPPA */
+};
+
+#define REG_SET_SEEN(ctx, nr) { if (ctx->reg_seen_collect) ctx->reg_seen |= BIT(nr); }
+#define REG_SET_SEEN_ALL(ctx) { if (ctx->reg_seen_collect) ctx->reg_seen = -1; }
+#define REG_FORCE_SEEN(ctx, nr) { ctx->reg_seen |= BIT(nr); }
+#define REG_WAS_SEEN(ctx, nr) (ctx->reg_seen & BIT(nr))
+#define REG_ALL_SEEN(ctx) (ctx->reg_seen == -1)
+
+#define HPPA_INSN_SIZE 4 /* bytes per HPPA asm instruction */
+#define REG_SIZE REG_SZ /* bytes per native "long" word */
+
+/* subtract hppa displacement on branches which is .+8 */
+#define HPPA_BRANCH_DISPLACEMENT 2 /* instructions */
+
+/* asm statement indicator to execute delay slot */
+#define EXEC_NEXT_INSTR 0
+#define NOP_NEXT_INSTR 1
+
+#define im11(val) (((u32)(val)) & 0x07ff)
+
+#define hppa_ldil(addr, reg) \
+ hppa_t5_insn(0x08, reg, ((u32)(addr)) >> 11) /* ldil im21,reg */
+#define hppa_addil(addr, reg) \
+ hppa_t5_insn(0x0a, reg, ((u32)(addr)) >> 11) /* addil im21,reg -> result in gr1 */
+#define hppa_ldo(im14, reg, target) \
+ hppa_t1_insn(0x0d, reg, target, im14) /* ldo val14(reg),target */
+#define hppa_ldi(im14, reg) \
+ hppa_ldo(im14, HPPA_REG_ZERO, reg) /* ldi val14,reg */
+#define hppa_or(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x09, target) /* or reg1,reg2,target */
+#define hppa_or_cond(reg1, reg2, cond, f, target) \
+ hppa_t6_insn(0x02, reg2, reg1, cond, f, 0x09, target)
+#define hppa_and(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x08, target) /* and reg1,reg2,target */
+#define hppa_and_cond(reg1, reg2, cond, f, target) \
+ hppa_t6_insn(0x02, reg2, reg1, cond, f, 0x08, target)
+#define hppa_xor(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x0a, target) /* xor reg1,reg2,target */
+#define hppa_add(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x18, target) /* add reg1,reg2,target */
+#define hppa_addc(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x1c, target) /* add,c reg1,reg2,target */
+#define hppa_sub(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x10, target) /* sub reg1,reg2,target */
+#define hppa_subb(reg1, reg2, target) \
+ hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x14, target) /* sub,b reg1,reg2,target */
+#define hppa_nop() \
+ hppa_or(0,0,0) /* nop: or 0,0,0 */
+#define hppa_addi(val11, reg, target) \
+ hppa_t7_insn(0x2d, reg, target, val11) /* addi im11,reg,target */
+#define hppa_subi(val11, reg, target) \
+ hppa_t7_insn(0x25, reg, target, val11) /* subi im11,reg,target */
+#define hppa_copy(reg, target) \
+ hppa_or(reg, HPPA_REG_ZERO, target) /* copy reg,target */
+#define hppa_ldw(val14, reg, target) \
+ hppa_t1_insn(0x12, reg, target, val14) /* ldw im14(reg),target */
+#define hppa_ldb(val14, reg, target) \
+ hppa_t1_insn(0x10, reg, target, val14) /* ldb im14(reg),target */
+#define hppa_ldh(val14, reg, target) \
+ hppa_t1_insn(0x11, reg, target, val14) /* ldh im14(reg),target */
+#define hppa_stw(reg, val14, base) \
+ hppa_t1_insn(0x1a, base, reg, val14) /* stw reg,im14(base) */
+#define hppa_stb(reg, val14, base) \
+ hppa_t1_insn(0x18, base, reg, val14) /* stb reg,im14(base) */
+#define hppa_sth(reg, val14, base) \
+ hppa_t1_insn(0x19, base, reg, val14) /* sth reg,im14(base) */
+#define hppa_stwma(reg, val14, base) \
+ hppa_t1_insn(0x1b, base, reg, val14) /* stw,ma reg,im14(base) */
+#define hppa_bv(reg, base, nop) \
+ hppa_t11_insn(0x3a, base, reg, 0x06, 0, nop) /* bv(,n) reg(base) */
+#define hppa_be(offset, base) \
+ hppa_t12_insn(0x38, base, offset, 0x00, 1) /* be,n offset(0,base) */
+#define hppa_be_l(offset, base, nop) \
+ hppa_t12_insn(0x39, base, offset, 0x00, nop) /* ble(,nop) offset(0,base) */
+#define hppa_mtctl(reg, cr) \
+ hppa_t21_insn(0x00, cr, reg, 0xc2, 0) /* mtctl reg,cr */
+#define hppa_mtsar(reg) \
+ hppa_mtctl(reg, 11) /* mtsar reg */
+#define hppa_zdep(r, p, len, target) \
+ hppa_t10_insn(0x35, target, r, 0, 2, p, len) /* zdep r,a,b,t */
+#define hppa_shl(r, len, target) \
+ hppa_zdep(r, len, len, lo(rd))
+#define hppa_depwz(r, p, len, target) \
+ hppa_t10_insn(0x35, target, r, 0, 3, 31-(p), 32-(len)) /* depw,z r,p,len,ret1 */
+#define hppa_depwz_sar(reg, target) \
+ hppa_t1_insn(0x35, target, reg, 0) /* depw,z reg,sar,32,target */
+#define hppa_shrpw_sar(reg, target) \
+ hppa_t10_insn(0x34, reg, 0, 0, 0, 0, target) /* shrpw r0,reg,sar,target */
+#define hppa_shrpw(r1, r2, p, target) \
+ hppa_t10_insn(0x34, r2, r1, 0, 2, 31-(p), target) /* shrpw r1,r2,p,target */
+#define hppa_shd(r1, r2, p, target) \
+ hppa_t10_insn(0x34, r2, r1, 0, 2, 31-(p), target) /* shrpw r1,r2,p,tarfer */
+#define hppa_extrws_sar(reg, target) \
+ hppa_t10_insn(0x34, reg, target, 0, 5, 0, 0) /* extrw,s reg,sar,32,ret0 */
+#define hppa_extrws(reg, p, len, target) \
+ hppa_t10_insn(0x34, reg, target, 0, 7, p, len) /* extrw,s reg,p,len,target */
+#define hppa_extru(r, p, len, target) \
+ hppa_t10_insn(0x34, r, target, 0, 6, p, 32-(len))
+#define hppa_shr(r, len, target) \
+ hppa_extru(r, 31-(len), 32-(len), target)
+#define hppa_bl(imm17, rp) \
+ hppa_t12_insn(0x3a, rp, imm17, 0x00, 1) /* bl,n target_addr,rp */
+#define hppa_sh2add(r1, r2, target) \
+ hppa_t6_insn(0x02, r2, r1, 0, 0, 0x1a, target) /* sh2add r1,r2,target */
+
+#define hppa_combt(r1, r2, target_addr, condition, nop) \
+ hppa_t11_insn(IS_ENABLED(CONFIG_64BIT) ? 0x27 : 0x20, \
+ r2, r1, condition, target_addr, nop) /* combt,cond,n r1,r2,addr */
+#define hppa_beq(r1, r2, target_addr) \
+ hppa_combt(r1, r2, target_addr, 1, NOP_NEXT_INSTR)
+#define hppa_blt(r1, r2, target_addr) \
+ hppa_combt(r1, r2, target_addr, 2, NOP_NEXT_INSTR)
+#define hppa_ble(r1, r2, target_addr) \
+ hppa_combt(r1, r2, target_addr, 3, NOP_NEXT_INSTR)
+#define hppa_bltu(r1, r2, target_addr) \
+ hppa_combt(r1, r2, target_addr, 4, NOP_NEXT_INSTR)
+#define hppa_bleu(r1, r2, target_addr) \
+ hppa_combt(r1, r2, target_addr, 5, NOP_NEXT_INSTR)
+
+#define hppa_combf(r1, r2, target_addr, condition, nop) \
+ hppa_t11_insn(IS_ENABLED(CONFIG_64BIT) ? 0x2f : 0x22, \
+ r2, r1, condition, target_addr, nop) /* combf,cond,n r1,r2,addr */
+#define hppa_bne(r1, r2, target_addr) \
+ hppa_combf(r1, r2, target_addr, 1, NOP_NEXT_INSTR)
+#define hppa_bge(r1, r2, target_addr) \
+ hppa_combf(r1, r2, target_addr, 2, NOP_NEXT_INSTR)
+#define hppa_bgt(r1, r2, target_addr) \
+ hppa_combf(r1, r2, target_addr, 3, NOP_NEXT_INSTR)
+#define hppa_bgeu(r1, r2, target_addr) \
+ hppa_combf(r1, r2, target_addr, 4, NOP_NEXT_INSTR)
+#define hppa_bgtu(r1, r2, target_addr) \
+ hppa_combf(r1, r2, target_addr, 5, NOP_NEXT_INSTR)
+
+/* 64-bit instructions */
+#ifdef CONFIG_64BIT
+#define hppa64_ldd_reg(reg, b, target) \
+ hppa_t10_insn(0x03, b, reg, 0, 0, 3<<1, target)
+#define hppa64_ldd_im5(im5, b, target) \
+ hppa_t10_insn(0x03, b, low_sign_unext(im5,5), 0, 1<<2, 3<<1, target)
+#define hppa64_ldd_im16(im16, b, target) \
+ hppa_t10_insn(0x14, b, target, 0, 0, 0, 0) | re_assemble_16(im16)
+#define hppa64_std_im5(src, im5, b) \
+ hppa_t10_insn(0x03, b, src, 0, 1<<2, 0xB<<1, low_sign_unext(im5,5))
+#define hppa64_std_im16(src, im16, b) \
+ hppa_t10_insn(0x1c, b, src, 0, 0, 0, 0) | re_assemble_16(im16)
+#define hppa64_bl_long(offs22) \
+ hppa_t12_L_insn(0x3a, offs22, 1)
+#define hppa64_mtsarcm(reg) \
+ hppa_t21_insn(0x00, 11, reg, 0xc6, 0)
+#define hppa64_shrpd_sar(reg, target) \
+ hppa_t10_insn(0x34, reg, 0, 0, 0, 1<<4, target)
+#define hppa64_shladd(r1, sa, r2, target) \
+ hppa_t6_insn(0x02, r2, r1, 0, 0, 1<<4|1<<3|sa, target)
+#define hppa64_depdz_sar(reg, target) \
+ hppa_t21_insn(0x35, target, reg, 3<<3, 0)
+#define hppa_extrd_sar(reg, target, se) \
+ hppa_t10_insn(0x34, reg, target, 0, 0, 0, 0) | 2<<11 | (se&1)<<10 | 1<<9 | 1<<8
+#define hppa64_bve_l_rp(base) \
+ (0x3a << 26) | (base << 21) | 0xf000
+#define hppa64_permh_3210(r, target) \
+ (0x3e << 26) | (r << 21) | (r << 16) | (target) | 0x00006900
+#define hppa64_hshl(r, sa, target) \
+ (0x3e << 26) | (0 << 21) | (r << 16) | (sa << 6) | (target) | 0x00008800
+#define hppa64_hshr_u(r, sa, target) \
+ (0x3e << 26) | (r << 21) | (0 << 16) | (sa << 6) | (target) | 0x0000c800
+#endif
+
+struct hppa_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct hppa_jit_context ctx;
+};
+
+static inline void bpf_fill_ill_insns(void *area, unsigned int size)
+{
+ memset(area, 0, size);
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+/* Emit a 4-byte HPPA instruction. */
+static inline void emit(const u32 insn, struct hppa_jit_context *ctx)
+{
+ if (ctx->insns) {
+ ctx->insns[ctx->ninsns] = insn;
+ }
+
+ ctx->ninsns++;
+}
+
+static inline int epilogue_offset(struct hppa_jit_context *ctx)
+{
+ int to = ctx->epilogue_offset, from = ctx->ninsns;
+
+ return (to - from);
+}
+
+/* Return -1 or inverted cond. */
+static inline int invert_bpf_cond(u8 cond)
+{
+ switch (cond) {
+ case BPF_JEQ:
+ return BPF_JNE;
+ case BPF_JGT:
+ return BPF_JLE;
+ case BPF_JLT:
+ return BPF_JGE;
+ case BPF_JGE:
+ return BPF_JLT;
+ case BPF_JLE:
+ return BPF_JGT;
+ case BPF_JNE:
+ return BPF_JEQ;
+ case BPF_JSGT:
+ return BPF_JSLE;
+ case BPF_JSLT:
+ return BPF_JSGE;
+ case BPF_JSGE:
+ return BPF_JSLT;
+ case BPF_JSLE:
+ return BPF_JSGT;
+ }
+ return -1;
+}
+
+
+static inline signed long hppa_offset(int insn, int off, struct hppa_jit_context *ctx)
+{
+ signed long from, to;
+
+ off++; /* BPF branch is from PC+1 */
+ from = (insn > 0) ? ctx->offset[insn - 1] : 0;
+ to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
+ return (to - from);
+}
+
+/* does the signed value fits into a given number of bits ? */
+static inline int check_bits_int(signed long val, int bits)
+{
+ return ((val >= 0) && ((val >> bits) == 0)) ||
+ ((val < 0) && (((~((u32)val)) >> (bits-1)) == 0));
+}
+
+/* can the signed value be used in relative code ? */
+static inline int relative_bits_ok(signed long val, int bits)
+{
+ return ((val >= 0) && (val < (1UL << (bits-1)))) || /* XXX */
+ ((val < 0) && (((~((unsigned long)val)) >> (bits-1)) == 0)
+ && (val & (1UL << (bits-1))));
+}
+
+/* can the signed value be used in relative branches ? */
+static inline int relative_branch_ok(signed long val, int bits)
+{
+ return ((val >= 0) && (val < (1UL << (bits-2)))) || /* XXX */
+ ((val < 0) && (((~((unsigned long)val)) < (1UL << (bits-2))))
+ && (val & (1UL << (bits-1))));
+}
+
+
+#define is_5b_int(val) check_bits_int(val, 5)
+
+static inline unsigned sign_unext(unsigned x, unsigned len)
+{
+ unsigned len_ones;
+
+ len_ones = (1 << len) - 1;
+ return x & len_ones;
+}
+
+static inline unsigned low_sign_unext(unsigned x, unsigned len)
+{
+ unsigned temp;
+ unsigned sign;
+
+ sign = (x >> (len-1)) & 1;
+ temp = sign_unext (x, len-1);
+ return (temp << 1) | sign;
+}
+
+static inline unsigned re_assemble_12(unsigned as12)
+{
+ return (( (as12 & 0x800) >> 11)
+ | ((as12 & 0x400) >> (10 - 2))
+ | ((as12 & 0x3ff) << (1 + 2)));
+}
+
+static inline unsigned re_assemble_14(unsigned as14)
+{
+ return (( (as14 & 0x1fff) << 1)
+ | ((as14 & 0x2000) >> 13));
+}
+
+#ifdef CONFIG_64BIT
+static inline unsigned re_assemble_16(unsigned as16)
+{
+ unsigned s, t;
+
+ /* Unusual 16-bit encoding, for wide mode only. */
+ t = (as16 << 1) & 0xffff;
+ s = (as16 & 0x8000);
+ return (t ^ s ^ (s >> 1)) | (s >> 15);
+}
+#endif
+
+static inline unsigned re_assemble_17(unsigned as17)
+{
+ return (( (as17 & 0x10000) >> 16)
+ | ((as17 & 0x0f800) << (16 - 11))
+ | ((as17 & 0x00400) >> (10 - 2))
+ | ((as17 & 0x003ff) << (1 + 2)));
+}
+
+static inline unsigned re_assemble_21(unsigned as21)
+{
+ return (( (as21 & 0x100000) >> 20)
+ | ((as21 & 0x0ffe00) >> 8)
+ | ((as21 & 0x000180) << 7)
+ | ((as21 & 0x00007c) << 14)
+ | ((as21 & 0x000003) << 12));
+}
+
+static inline unsigned re_assemble_22(unsigned as22)
+{
+ return (( (as22 & 0x200000) >> 21)
+ | ((as22 & 0x1f0000) << (21 - 16))
+ | ((as22 & 0x00f800) << (16 - 11))
+ | ((as22 & 0x000400) >> (10 - 2))
+ | ((as22 & 0x0003ff) << (1 + 2)));
+}
+
+/* Various HPPA instruction formats. */
+/* see https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf, appendix C */
+
+static inline u32 hppa_t1_insn(u8 opcode, u8 b, u8 r, s16 im14)
+{
+ return ((opcode << 26) | (b << 21) | (r << 16) | re_assemble_14(im14));
+}
+
+static inline u32 hppa_t5_insn(u8 opcode, u8 tr, u32 val21)
+{
+ return ((opcode << 26) | (tr << 21) | re_assemble_21(val21));
+}
+
+static inline u32 hppa_t6_insn(u8 opcode, u8 r2, u8 r1, u8 c, u8 f, u8 ext6, u16 t)
+{
+ return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) | (f << 12) |
+ (ext6 << 6) | t);
+}
+
+/* 7. Arithmetic immediate */
+static inline u32 hppa_t7_insn(u8 opcode, u8 r, u8 t, u32 im11)
+{
+ return ((opcode << 26) | (r << 21) | (t << 16) | low_sign_unext(im11, 11));
+}
+
+/* 10. Shift instructions */
+static inline u32 hppa_t10_insn(u8 opcode, u8 r2, u8 r1, u8 c, u8 ext3, u8 cp, u8 t)
+{
+ return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) |
+ (ext3 << 10) | (cp << 5) | t);
+}
+
+/* 11. Conditional branch instructions */
+static inline u32 hppa_t11_insn(u8 opcode, u8 r2, u8 r1, u8 c, u32 w, u8 nop)
+{
+ u32 ra = re_assemble_12(w);
+ // ra = low_sign_unext(w,11) | (w & (1<<10)
+ return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) | (nop << 1) | ra);
+}
+
+/* 12. Branch instructions */
+static inline u32 hppa_t12_insn(u8 opcode, u8 rp, u32 w, u8 ext3, u8 nop)
+{
+ return ((opcode << 26) | (rp << 21) | (ext3 << 13) | (nop << 1) | re_assemble_17(w));
+}
+
+static inline u32 hppa_t12_L_insn(u8 opcode, u32 w, u8 nop)
+{
+ return ((opcode << 26) | (0x05 << 13) | (nop << 1) | re_assemble_22(w));
+}
+
+/* 21. Move to control register */
+static inline u32 hppa_t21_insn(u8 opcode, u8 r2, u8 r1, u8 ext8, u8 t)
+{
+ return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (ext8 << 5) | t);
+}
+
+/* Helper functions called by jit code on HPPA32 and HPPA64. */
+
+u64 hppa_div64(u64 div, u64 divisor);
+u64 hppa_div64_rem(u64 div, u64 divisor);
+
+/* Helper functions that emit HPPA instructions when possible. */
+
+void bpf_jit_build_prologue(struct hppa_jit_context *ctx);
+void bpf_jit_build_epilogue(struct hppa_jit_context *ctx);
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx,
+ bool extra_pass);
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/parisc/net/bpf_jit_comp32.c b/arch/parisc/net/bpf_jit_comp32.c
new file mode 100644
index 000000000000..5ff0cf925fe9
--- /dev/null
+++ b/arch/parisc/net/bpf_jit_comp32.c
@@ -0,0 +1,1615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BPF JIT compiler for PA-RISC (32-bit)
+ *
+ * Copyright (c) 2023 Helge Deller <deller@gmx.de>
+ *
+ * The code is based on the BPF JIT compiler for RV64 by Björn Töpel and
+ * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/libgcc.h>
+#include "bpf_jit.h"
+
+/*
+ * Stack layout during BPF program execution (note: stack grows up):
+ *
+ * high
+ * HPPA32 sp => +----------+ <= HPPA32 fp
+ * | saved sp |
+ * | saved rp |
+ * | ... | HPPA32 callee-saved registers
+ * | curr args|
+ * | local var|
+ * +----------+ <= (sp - 4 * NR_SAVED_REGISTERS)
+ * | lo(R9) |
+ * | hi(R9) |
+ * | lo(FP) | JIT scratch space for BPF registers
+ * | hi(FP) |
+ * | ... |
+ * +----------+ <= (sp - 4 * NR_SAVED_REGISTERS
+ * | | - 4 * BPF_JIT_SCRATCH_REGS)
+ * | |
+ * | ... | BPF program stack
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +----------+
+ * low
+ */
+
+enum {
+ /* Stack layout - these are offsets from top of JIT scratch space. */
+ BPF_R8_HI,
+ BPF_R8_LO,
+ BPF_R9_HI,
+ BPF_R9_LO,
+ BPF_FP_HI,
+ BPF_FP_LO,
+ BPF_AX_HI,
+ BPF_AX_LO,
+ BPF_R0_TEMP_HI,
+ BPF_R0_TEMP_LO,
+ BPF_JIT_SCRATCH_REGS,
+};
+
+/* Number of callee-saved registers stored to stack: rp, r3-r18. */
+#define NR_SAVED_REGISTERS (18 - 3 + 1 + 8)
+
+/* Offset from fp for BPF registers stored on stack. */
+#define STACK_OFFSET(k) (- (NR_SAVED_REGISTERS + k + 1))
+#define STACK_ALIGN FRAME_SIZE
+
+#define EXIT_PTR_LOAD(reg) hppa_ldw(-0x08, HPPA_REG_SP, reg)
+#define EXIT_PTR_STORE(reg) hppa_stw(reg, -0x08, HPPA_REG_SP)
+#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop)
+
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
+#define TMP_REG_R0 (MAX_BPF_JIT_REG + 2)
+
+static const s8 regmap[][2] = {
+ /* Return value from in-kernel function, and exit value from eBPF. */
+ [BPF_REG_0] = {HPPA_REG_RET0, HPPA_REG_RET1}, /* HI/LOW */
+
+ /* Arguments from eBPF program to in-kernel function. */
+ [BPF_REG_1] = {HPPA_R(3), HPPA_R(4)},
+ [BPF_REG_2] = {HPPA_R(5), HPPA_R(6)},
+ [BPF_REG_3] = {HPPA_R(7), HPPA_R(8)},
+ [BPF_REG_4] = {HPPA_R(9), HPPA_R(10)},
+ [BPF_REG_5] = {HPPA_R(11), HPPA_R(12)},
+
+ [BPF_REG_6] = {HPPA_R(13), HPPA_R(14)},
+ [BPF_REG_7] = {HPPA_R(15), HPPA_R(16)},
+ /*
+ * Callee-saved registers that in-kernel function will preserve.
+ * Stored on the stack.
+ */
+ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
+
+ /* Read-only frame pointer to access BPF stack. Not needed. */
+ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
+
+ /* Temporary register for blinding constants. Stored on the stack. */
+ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
+ /*
+ * Temporary registers used by the JIT to operate on registers stored
+ * on the stack. Save t0 and t1 to be used as temporaries in generated
+ * code.
+ */
+ [TMP_REG_1] = {HPPA_REG_T3, HPPA_REG_T2},
+ [TMP_REG_2] = {HPPA_REG_T5, HPPA_REG_T4},
+
+ /* temporary space for BPF_R0 during libgcc and millicode calls */
+ [TMP_REG_R0] = {STACK_OFFSET(BPF_R0_TEMP_HI), STACK_OFFSET(BPF_R0_TEMP_LO)},
+};
+
+static s8 hi(const s8 *r)
+{
+ return r[0];
+}
+
+static s8 lo(const s8 *r)
+{
+ return r[1];
+}
+
+static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
+{
+ REG_SET_SEEN(ctx, rd);
+ if (OPTIMIZE_HPPA && (rs == rd))
+ return;
+ REG_SET_SEEN(ctx, rs);
+ emit(hppa_copy(rs, rd), ctx);
+}
+
+static void emit_hppa_xor(const s8 r1, const s8 r2, const s8 r3, struct hppa_jit_context *ctx)
+{
+ REG_SET_SEEN(ctx, r1);
+ REG_SET_SEEN(ctx, r2);
+ REG_SET_SEEN(ctx, r3);
+ if (OPTIMIZE_HPPA && (r1 == r2)) {
+ emit(hppa_copy(HPPA_REG_ZERO, r3), ctx);
+ } else {
+ emit(hppa_xor(r1, r2, r3), ctx);
+ }
+}
+
+static void emit_imm(const s8 rd, s32 imm, struct hppa_jit_context *ctx)
+{
+ u32 lower = im11(imm);
+
+ REG_SET_SEEN(ctx, rd);
+ if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) {
+ emit(hppa_ldi(imm, rd), ctx);
+ return;
+ }
+ emit(hppa_ldil(imm, rd), ctx);
+ if (OPTIMIZE_HPPA && (lower == 0))
+ return;
+ emit(hppa_ldo(lower, rd, rd), ctx);
+}
+
+static void emit_imm32(const s8 *rd, s32 imm, struct hppa_jit_context *ctx)
+{
+ /* Emit immediate into lower bits. */
+ REG_SET_SEEN(ctx, lo(rd));
+ emit_imm(lo(rd), imm, ctx);
+
+ /* Sign-extend into upper bits. */
+ REG_SET_SEEN(ctx, hi(rd));
+ if (imm >= 0)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ else
+ emit(hppa_ldi(-1, hi(rd)), ctx);
+}
+
+static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
+ struct hppa_jit_context *ctx)
+{
+ emit_imm(hi(rd), imm_hi, ctx);
+ emit_imm(lo(rd), imm_lo, ctx);
+}
+
+static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx)
+{
+ const s8 *r0 = regmap[BPF_REG_0];
+ int i;
+
+ if (is_tail_call) {
+ /*
+ * goto *(t0 + 4);
+ * Skips first instruction of prologue which initializes tail
+ * call counter. Assumes t0 contains address of target program,
+ * see emit_bpf_tail_call.
+ */
+ emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx);
+ /* in delay slot: */
+ emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx);
+
+ return;
+ }
+
+ /* load epilogue function pointer and jump to it. */
+ /* exit point is either directly below, or the outest TCC exit function */
+ emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /* NOTE: we are 32-bit and big-endian, so return lower 32-bit value */
+ emit_hppa_copy(lo(r0), HPPA_REG_RET0, ctx);
+
+ /* Restore callee-saved registers. */
+ for (i = 3; i <= 18; i++) {
+ if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
+ continue;
+ emit(hppa_ldw(-REG_SIZE * (8 + (i-3)), HPPA_REG_SP, HPPA_R(i)), ctx);
+ }
+
+ /* load original return pointer (stored by outest TCC function) */
+ emit(hppa_ldw(-0x14, HPPA_REG_SP, HPPA_REG_RP), ctx);
+ emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx);
+ /* in delay slot: */
+ emit(hppa_ldw(-0x04, HPPA_REG_SP, HPPA_REG_SP), ctx);
+}
+
+static bool is_stacked(s8 reg)
+{
+ return reg < 0;
+}
+
+static const s8 *bpf_get_reg64_offset(const s8 *reg, const s8 *tmp,
+ u16 offset_sp, struct hppa_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(hppa_ldw(REG_SIZE * hi(reg) - offset_sp, HPPA_REG_SP, hi(tmp)), ctx);
+ emit(hppa_ldw(REG_SIZE * lo(reg) - offset_sp, HPPA_REG_SP, lo(tmp)), ctx);
+ reg = tmp;
+ }
+ REG_SET_SEEN(ctx, hi(reg));
+ REG_SET_SEEN(ctx, lo(reg));
+ return reg;
+}
+
+static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
+ struct hppa_jit_context *ctx)
+{
+ return bpf_get_reg64_offset(reg, tmp, 0, ctx);
+}
+
+static const s8 *bpf_get_reg64_ref(const s8 *reg, const s8 *tmp,
+ bool must_load, struct hppa_jit_context *ctx)
+{
+ if (!OPTIMIZE_HPPA)
+ return bpf_get_reg64(reg, tmp, ctx);
+
+ if (is_stacked(hi(reg))) {
+ if (must_load)
+ emit(hppa_ldw(REG_SIZE * hi(reg), HPPA_REG_SP, hi(tmp)), ctx);
+ reg = tmp;
+ }
+ REG_SET_SEEN(ctx, hi(reg));
+ REG_SET_SEEN(ctx, lo(reg));
+ return reg;
+}
+
+
+static void bpf_put_reg64(const s8 *reg, const s8 *src,
+ struct hppa_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(hppa_stw(hi(src), REG_SIZE * hi(reg), HPPA_REG_SP), ctx);
+ emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx);
+ }
+}
+
+static void bpf_save_R0(struct hppa_jit_context *ctx)
+{
+ bpf_put_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx);
+}
+
+static void bpf_restore_R0(struct hppa_jit_context *ctx)
+{
+ bpf_get_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx);
+}
+
+
+static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
+ struct hppa_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ emit(hppa_ldw(REG_SIZE * lo(reg), HPPA_REG_SP, lo(tmp)), ctx);
+ reg = tmp;
+ }
+ REG_SET_SEEN(ctx, lo(reg));
+ return reg;
+}
+
+static const s8 *bpf_get_reg32_ref(const s8 *reg, const s8 *tmp,
+ struct hppa_jit_context *ctx)
+{
+ if (!OPTIMIZE_HPPA)
+ return bpf_get_reg32(reg, tmp, ctx);
+
+ if (is_stacked(hi(reg))) {
+ reg = tmp;
+ }
+ REG_SET_SEEN(ctx, lo(reg));
+ return reg;
+}
+
+static void bpf_put_reg32(const s8 *reg, const s8 *src,
+ struct hppa_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ REG_SET_SEEN(ctx, lo(src));
+ emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx);
+ if (1 && !ctx->prog->aux->verifier_zext) {
+ REG_SET_SEEN(ctx, hi(reg));
+ emit(hppa_stw(HPPA_REG_ZERO, REG_SIZE * hi(reg), HPPA_REG_SP), ctx);
+ }
+ } else if (1 && !ctx->prog->aux->verifier_zext) {
+ REG_SET_SEEN(ctx, hi(reg));
+ emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx);
+ }
+}
+
+/* extern hppa millicode functions */
+extern void $$mulI(void);
+extern void $$divU(void);
+extern void $$remU(void);
+
+static void emit_call_millicode(void *func, const s8 arg0,
+ const s8 arg1, u8 opcode, struct hppa_jit_context *ctx)
+{
+ u32 func_addr;
+
+ emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx);
+ emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx);
+
+ /* libcgcc overwrites HPPA_REG_RET0/1, save temp. in dest. */
+ if (arg0 != HPPA_REG_RET1)
+ bpf_save_R0(ctx);
+
+ func_addr = (uintptr_t) dereference_function_descriptor(func);
+ emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx);
+ /* skip the following be_l instruction if divisor is zero. */
+ if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
+ if (BPF_OP(opcode) == BPF_DIV)
+ emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx);
+ else
+ emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx);
+ emit(hppa_or_cond(HPPA_REG_ARG1, HPPA_REG_ZERO, 1, 0, HPPA_REG_ZERO), ctx);
+ }
+ /* Note: millicode functions use r31 as return pointer instead of rp */
+ emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx);
+ emit(hppa_nop(), ctx); /* this nop is needed here for delay slot */
+
+ /* Note: millicode functions return result in RET1, not RET0 */
+ emit_hppa_copy(HPPA_REG_RET1, arg0, ctx);
+
+ /* restore HPPA_REG_RET0/1, temp. save in dest. */
+ if (arg0 != HPPA_REG_RET1)
+ bpf_restore_R0(ctx);
+}
+
+static void emit_call_libgcc_ll(void *func, const s8 *arg0,
+ const s8 *arg1, u8 opcode, struct hppa_jit_context *ctx)
+{
+ u32 func_addr;
+
+ emit_hppa_copy(lo(arg0), HPPA_REG_ARG0, ctx);
+ emit_hppa_copy(hi(arg0), HPPA_REG_ARG1, ctx);
+ emit_hppa_copy(lo(arg1), HPPA_REG_ARG2, ctx);
+ emit_hppa_copy(hi(arg1), HPPA_REG_ARG3, ctx);
+
+ /* libcgcc overwrites HPPA_REG_RET0/_RET1, so keep copy of R0 on stack */
+ if (hi(arg0) != HPPA_REG_RET0)
+ bpf_save_R0(ctx);
+
+ /* prepare stack */
+ emit(hppa_ldo(2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ func_addr = (uintptr_t) dereference_function_descriptor(func);
+ emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx);
+ /* zero out the following be_l instruction if divisor is 0 (and set default values) */
+ if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
+ emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx);
+ if (BPF_OP(opcode) == BPF_DIV)
+ emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx);
+ else
+ emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx);
+ emit(hppa_or_cond(HPPA_REG_ARG2, HPPA_REG_ARG3, 1, 0, HPPA_REG_ZERO), ctx);
+ }
+ emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx);
+ emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx);
+
+ /* restore stack */
+ emit(hppa_ldo(-2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ emit_hppa_copy(HPPA_REG_RET0, hi(arg0), ctx);
+ emit_hppa_copy(HPPA_REG_RET1, lo(arg0), ctx);
+
+ /* restore HPPA_REG_RET0/_RET1 */
+ if (hi(arg0) != HPPA_REG_RET0)
+ bpf_restore_R0(ctx);
+}
+
+static void emit_jump(s32 paoff, bool force_far,
+ struct hppa_jit_context *ctx)
+{
+ unsigned long pc, addr;
+
+ /* Note: allocate 2 instructions for jumps if force_far is set. */
+ if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 17)) {
+ /* use BL,short branch followed by nop() */
+ emit(hppa_bl(paoff - HPPA_BRANCH_DISPLACEMENT, HPPA_REG_ZERO), ctx);
+ if (force_far)
+ emit(hppa_nop(), ctx);
+ return;
+ }
+
+ pc = (uintptr_t) &ctx->insns[ctx->ninsns];
+ addr = pc + (paoff * HPPA_INSN_SIZE);
+ emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
+ emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx); // be,l,n addr(sr4,r31), %sr0, %r31
+}
+
+static void emit_alu_i64(const s8 *dst, s32 imm,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *rd;
+
+ if (0 && op == BPF_MOV)
+ rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
+ else
+ rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ /* dst = dst OP imm */
+ switch (op) {
+ case BPF_MOV:
+ emit_imm32(rd, imm, ctx);
+ break;
+ case BPF_AND:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ if (imm >= 0)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case BPF_OR:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ if (imm < 0)
+ emit_imm(hi(rd), -1, ctx);
+ break;
+ case BPF_XOR:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx);
+ if (imm < 0) {
+ emit_imm(HPPA_REG_T0, -1, ctx);
+ emit_hppa_xor(hi(rd), HPPA_REG_T0, hi(rd), ctx);
+ }
+ break;
+ case BPF_LSH:
+ if (imm == 0)
+ break;
+ if (imm > 32) {
+ imm -= 32;
+ emit(hppa_zdep(lo(rd), imm, imm, hi(rd)), ctx);
+ emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx);
+ } else if (imm == 32) {
+ emit_hppa_copy(lo(rd), hi(rd), ctx);
+ emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx);
+ } else {
+ emit(hppa_shd(hi(rd), lo(rd), 32 - imm, hi(rd)), ctx);
+ emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx);
+ }
+ break;
+ case BPF_RSH:
+ if (imm == 0)
+ break;
+ if (imm > 32) {
+ imm -= 32;
+ emit(hppa_shr(hi(rd), imm, lo(rd)), ctx);
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ } else if (imm == 32) {
+ emit_hppa_copy(hi(rd), lo(rd), ctx);
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ } else {
+ emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx);
+ emit(hppa_shr(hi(rd), imm, hi(rd)), ctx);
+ }
+ break;
+ case BPF_ARSH:
+ if (imm == 0)
+ break;
+ if (imm > 32) {
+ imm -= 32;
+ emit(hppa_extrws(hi(rd), 31 - imm, imm, lo(rd)), ctx);
+ emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx);
+ } else if (imm == 32) {
+ emit_hppa_copy(hi(rd), lo(rd), ctx);
+ emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx);
+ } else {
+ emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx);
+ emit(hppa_extrws(hi(rd), 31 - imm, imm, hi(rd)), ctx);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_i32(const s8 *dst, s32 imm,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
+
+ if (op == BPF_MOV)
+ rd = bpf_get_reg32_ref(dst, tmp1, ctx);
+ else
+ rd = bpf_get_reg32(dst, tmp1, ctx);
+
+ /* dst = dst OP imm */
+ switch (op) {
+ case BPF_MOV:
+ emit_imm(lo(rd), imm, ctx);
+ break;
+ case BPF_ADD:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_add(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ break;
+ case BPF_SUB:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_sub(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ break;
+ case BPF_AND:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ break;
+ case BPF_OR:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
+ break;
+ case BPF_XOR:
+ emit_imm(HPPA_REG_T0, imm, ctx);
+ emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx);
+ break;
+ case BPF_LSH:
+ if (imm != 0)
+ emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx);
+ break;
+ case BPF_RSH:
+ if (imm != 0)
+ emit(hppa_shr(lo(rd), imm, lo(rd)), ctx);
+ break;
+ case BPF_ARSH:
+ if (imm != 0)
+ emit(hppa_extrws(lo(rd), 31 - imm, imm, lo(rd)), ctx);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static void emit_alu_r64(const s8 *dst, const s8 *src,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+ const s8 *rd;
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ if (op == BPF_MOV)
+ rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
+ else
+ rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ /* dst = dst OP src */
+ switch (op) {
+ case BPF_MOV:
+ emit_hppa_copy(lo(rs), lo(rd), ctx);
+ emit_hppa_copy(hi(rs), hi(rd), ctx);
+ break;
+ case BPF_ADD:
+ emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx);
+ emit(hppa_addc(hi(rd), hi(rs), hi(rd)), ctx);
+ break;
+ case BPF_SUB:
+ emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx);
+ emit(hppa_subb(hi(rd), hi(rs), hi(rd)), ctx);
+ break;
+ case BPF_AND:
+ emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx);
+ emit(hppa_and(hi(rd), hi(rs), hi(rd)), ctx);
+ break;
+ case BPF_OR:
+ emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx);
+ emit(hppa_or(hi(rd), hi(rs), hi(rd)), ctx);
+ break;
+ case BPF_XOR:
+ emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx);
+ emit_hppa_xor(hi(rd), hi(rs), hi(rd), ctx);
+ break;
+ case BPF_MUL:
+ emit_call_libgcc_ll(__muldi3, rd, rs, op, ctx);
+ break;
+ case BPF_DIV:
+ emit_call_libgcc_ll(&hppa_div64, rd, rs, op, ctx);
+ break;
+ case BPF_MOD:
+ emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, op, ctx);
+ break;
+ case BPF_LSH:
+ emit_call_libgcc_ll(__ashldi3, rd, rs, op, ctx);
+ break;
+ case BPF_RSH:
+ emit_call_libgcc_ll(__lshrdi3, rd, rs, op, ctx);
+ break;
+ case BPF_ARSH:
+ emit_call_libgcc_ll(__ashrdi3, rd, rs, op, ctx);
+ break;
+ case BPF_NEG:
+ emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx);
+ emit(hppa_subb(HPPA_REG_ZERO, hi(rd), hi(rd)), ctx);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_r32(const s8 *dst, const s8 *src,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+ const s8 *rd;
+ const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
+
+ if (op == BPF_MOV)
+ rd = bpf_get_reg32_ref(dst, tmp1, ctx);
+ else
+ rd = bpf_get_reg32(dst, tmp1, ctx);
+
+ /* dst = dst OP src */
+ switch (op) {
+ case BPF_MOV:
+ emit_hppa_copy(lo(rs), lo(rd), ctx);
+ break;
+ case BPF_ADD:
+ emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx);
+ break;
+ case BPF_SUB:
+ emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx);
+ break;
+ case BPF_AND:
+ emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx);
+ break;
+ case BPF_OR:
+ emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx);
+ break;
+ case BPF_XOR:
+ emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx);
+ break;
+ case BPF_MUL:
+ emit_call_millicode($$mulI, lo(rd), lo(rs), op, ctx);
+ break;
+ case BPF_DIV:
+ emit_call_millicode($$divU, lo(rd), lo(rs), op, ctx);
+ break;
+ case BPF_MOD:
+ emit_call_millicode($$remU, lo(rd), lo(rs), op, ctx);
+ break;
+ case BPF_LSH:
+ emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx);
+ emit(hppa_mtsar(HPPA_REG_T0), ctx);
+ emit(hppa_depwz_sar(lo(rd), lo(rd)), ctx);
+ break;
+ case BPF_RSH:
+ emit(hppa_mtsar(lo(rs)), ctx);
+ emit(hppa_shrpw_sar(lo(rd), lo(rd)), ctx);
+ break;
+ case BPF_ARSH: /* sign extending arithmetic shift right */
+ // emit(hppa_beq(lo(rs), HPPA_REG_ZERO, 2), ctx);
+ emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx);
+ emit(hppa_mtsar(HPPA_REG_T0), ctx);
+ emit(hppa_extrws_sar(lo(rd), lo(rd)), ctx);
+ break;
+ case BPF_NEG:
+ emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx); // sub r0,rd,rd
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 paoff,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
+
+ /*
+ * NO_JUMP skips over the rest of the instructions and the
+ * emit_jump, meaning the BPF branch is not taken.
+ * JUMP skips directly to the emit_jump, meaning
+ * the BPF branch is taken.
+ *
+ * The fallthrough case results in the BPF branch being taken.
+ */
+#define NO_JUMP(idx) (2 + (idx) - 1)
+#define JUMP(idx) (0 + (idx) - 1)
+
+ switch (op) {
+ case BPF_JEQ:
+ emit(hppa_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGT:
+ emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLT:
+ emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGE:
+ emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLE:
+ emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JNE:
+ emit(hppa_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
+ emit(hppa_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGT:
+ emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLT:
+ emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGE:
+ emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLE:
+ emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSET:
+ emit(hppa_and(hi(rs1), hi(rs2), HPPA_REG_T0), ctx);
+ emit(hppa_and(lo(rs1), lo(rs2), HPPA_REG_T1), ctx);
+ emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, JUMP(1)), ctx);
+ emit(hppa_beq(HPPA_REG_T1, HPPA_REG_ZERO, NO_JUMP(0)), ctx);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+#undef NO_JUMP
+#undef JUMP
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ paoff -= (e - s);
+ emit_jump(paoff, true, ctx);
+ return 0;
+}
+
+static int emit_bcc(u8 op, u8 rd, u8 rs, int paoff, struct hppa_jit_context *ctx)
+{
+ int e, s;
+ bool far = false;
+ int off;
+
+ if (op == BPF_JSET) {
+ /*
+ * BPF_JSET is a special case: it has no inverse so we always
+ * treat it as a far branch.
+ */
+ emit(hppa_and(rd, rs, HPPA_REG_T0), ctx);
+ paoff -= 1; /* reduce offset due to hppa_and() above */
+ rd = HPPA_REG_T0;
+ rs = HPPA_REG_ZERO;
+ op = BPF_JNE;
+ }
+
+ s = ctx->ninsns;
+
+ if (!relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 12)) {
+ op = invert_bpf_cond(op);
+ far = true;
+ }
+
+ /*
+ * For a far branch, the condition is negated and we jump over the
+ * branch itself, and the three instructions from emit_jump.
+ * For a near branch, just use paoff.
+ */
+ off = far ? (HPPA_BRANCH_DISPLACEMENT - 1) : paoff - HPPA_BRANCH_DISPLACEMENT;
+
+ switch (op) {
+ /* IF (dst COND src) JUMP off */
+ case BPF_JEQ:
+ emit(hppa_beq(rd, rs, off), ctx);
+ break;
+ case BPF_JGT:
+ emit(hppa_bgtu(rd, rs, off), ctx);
+ break;
+ case BPF_JLT:
+ emit(hppa_bltu(rd, rs, off), ctx);
+ break;
+ case BPF_JGE:
+ emit(hppa_bgeu(rd, rs, off), ctx);
+ break;
+ case BPF_JLE:
+ emit(hppa_bleu(rd, rs, off), ctx);
+ break;
+ case BPF_JNE:
+ emit(hppa_bne(rd, rs, off), ctx);
+ break;
+ case BPF_JSGT:
+ emit(hppa_bgt(rd, rs, off), ctx);
+ break;
+ case BPF_JSLT:
+ emit(hppa_blt(rd, rs, off), ctx);
+ break;
+ case BPF_JSGE:
+ emit(hppa_bge(rd, rs, off), ctx);
+ break;
+ case BPF_JSLE:
+ emit(hppa_ble(rd, rs, off), ctx);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (far) {
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ paoff -= (e - s);
+ emit_jump(paoff, true, ctx);
+ }
+ return 0;
+}
+
+static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 paoff,
+ struct hppa_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ paoff -= (e - s);
+
+ if (emit_bcc(op, lo(rs1), lo(rs2), paoff, ctx))
+ return -1;
+
+ return 0;
+}
+
+static void emit_call(bool fixed, u64 addr, struct hppa_jit_context *ctx)
+{
+ const s8 *tmp = regmap[TMP_REG_1];
+ const s8 *r0 = regmap[BPF_REG_0];
+ const s8 *reg;
+ const int offset_sp = 2 * STACK_ALIGN;
+
+ /* prepare stack */
+ emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ /* load R1 & R2 in registers, R3-R5 to stack. */
+ reg = bpf_get_reg64_offset(regmap[BPF_REG_5], tmp, offset_sp, ctx);
+ emit(hppa_stw(hi(reg), -0x48, HPPA_REG_SP), ctx);
+ emit(hppa_stw(lo(reg), -0x44, HPPA_REG_SP), ctx);
+
+ reg = bpf_get_reg64_offset(regmap[BPF_REG_4], tmp, offset_sp, ctx);
+ emit(hppa_stw(hi(reg), -0x40, HPPA_REG_SP), ctx);
+ emit(hppa_stw(lo(reg), -0x3c, HPPA_REG_SP), ctx);
+
+ reg = bpf_get_reg64_offset(regmap[BPF_REG_3], tmp, offset_sp, ctx);
+ emit(hppa_stw(hi(reg), -0x38, HPPA_REG_SP), ctx);
+ emit(hppa_stw(lo(reg), -0x34, HPPA_REG_SP), ctx);
+
+ reg = bpf_get_reg64_offset(regmap[BPF_REG_2], tmp, offset_sp, ctx);
+ emit_hppa_copy(hi(reg), HPPA_REG_ARG3, ctx);
+ emit_hppa_copy(lo(reg), HPPA_REG_ARG2, ctx);
+
+ reg = bpf_get_reg64_offset(regmap[BPF_REG_1], tmp, offset_sp, ctx);
+ emit_hppa_copy(hi(reg), HPPA_REG_ARG1, ctx);
+ emit_hppa_copy(lo(reg), HPPA_REG_ARG0, ctx);
+
+ /* backup TCC */
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx);
+
+ /*
+ * Use ldil() to load absolute address. Don't use emit_imm as the
+ * number of emitted instructions should not depend on the value of
+ * addr.
+ */
+ emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
+ emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx);
+ /* set return address in delay slot */
+ emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx);
+
+ /* restore TCC */
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx);
+
+ /* restore stack */
+ emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ /* set return value. */
+ emit_hppa_copy(HPPA_REG_RET0, hi(r0), ctx);
+ emit_hppa_copy(HPPA_REG_RET1, lo(r0), ctx);
+}
+
+static int emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx)
+{
+ /*
+ * R1 -> &ctx
+ * R2 -> &array
+ * R3 -> index
+ */
+ int off;
+ const s8 *arr_reg = regmap[BPF_REG_2];
+ const s8 *idx_reg = regmap[BPF_REG_3];
+ struct bpf_array bpfa;
+ struct bpf_prog bpfp;
+
+ /* get address of TCC main exit function for error case into rp */
+ emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
+
+ /* max_entries = array->map.max_entries; */
+ off = offsetof(struct bpf_array, map.max_entries);
+ BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
+ emit(hppa_ldw(off, lo(arr_reg), HPPA_REG_T1), ctx);
+
+ /*
+ * if (index >= max_entries)
+ * goto out;
+ */
+ emit(hppa_bltu(lo(idx_reg), HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * if (--tcc < 0)
+ * goto out;
+ */
+ REG_FORCE_SEEN(ctx, HPPA_REG_TCC);
+ emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx);
+ emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 4);
+ emit(hppa_sh2add(lo(idx_reg), lo(arr_reg), HPPA_REG_T0), ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ BUILD_BUG_ON(!relative_bits_ok(off, 11));
+ emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * tcc = temp_tcc;
+ * goto *(prog->bpf_func + 4);
+ */
+ off = offsetof(struct bpf_prog, bpf_func);
+ BUILD_BUG_ON(!relative_bits_ok(off, 11));
+ BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 4);
+ emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ /* Epilogue jumps to *(t0 + 4). */
+ __build_epilogue(true, ctx);
+ return 0;
+}
+
+static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
+ struct hppa_jit_context *ctx, const u8 size)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64_ref(dst, tmp1, ctx->prog->aux->verifier_zext, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+ s8 srcreg;
+
+ /* need to calculate address since offset does not fit in 14 bits? */
+ if (relative_bits_ok(off, 14))
+ srcreg = lo(rs);
+ else {
+ /* need to use R1 here, since addil puts result into R1 */
+ srcreg = HPPA_REG_R1;
+ emit(hppa_addil(off, lo(rs)), ctx);
+ off = im11(off);
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ switch (size) {
+ case BPF_B:
+ emit(hppa_ldb(off + 0, srcreg, lo(rd)), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case BPF_H:
+ emit(hppa_ldh(off + 0, srcreg, lo(rd)), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case BPF_W:
+ emit(hppa_ldw(off + 0, srcreg, lo(rd)), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case BPF_DW:
+ emit(hppa_ldw(off + 0, srcreg, hi(rd)), ctx);
+ emit(hppa_ldw(off + 4, srcreg, lo(rd)), ctx);
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ return 0;
+}
+
+static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
+ struct hppa_jit_context *ctx, const u8 size,
+ const u8 mode)
+{
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+ s8 dstreg;
+
+ /* need to calculate address since offset does not fit in 14 bits? */
+ if (relative_bits_ok(off, 14))
+ dstreg = lo(rd);
+ else {
+ /* need to use R1 here, since addil puts result into R1 */
+ dstreg = HPPA_REG_R1;
+ emit(hppa_addil(off, lo(rd)), ctx);
+ off = im11(off);
+ }
+
+ /* ST: *(size *)(dst + off) = imm */
+ switch (size) {
+ case BPF_B:
+ emit(hppa_stb(lo(rs), off + 0, dstreg), ctx);
+ break;
+ case BPF_H:
+ emit(hppa_sth(lo(rs), off + 0, dstreg), ctx);
+ break;
+ case BPF_W:
+ emit(hppa_stw(lo(rs), off + 0, dstreg), ctx);
+ break;
+ case BPF_DW:
+ emit(hppa_stw(hi(rs), off + 0, dstreg), ctx);
+ emit(hppa_stw(lo(rs), off + 4, dstreg), ctx);
+ break;
+ }
+
+ return 0;
+}
+
+static void emit_rev16(const s8 rd, struct hppa_jit_context *ctx)
+{
+ emit(hppa_extru(rd, 23, 8, HPPA_REG_T1), ctx);
+ emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx);
+ emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx);
+}
+
+static void emit_rev32(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
+{
+ emit(hppa_shrpw(rs, rs, 16, HPPA_REG_T1), ctx);
+ emit(hppa_depwz(HPPA_REG_T1, 15, 8, HPPA_REG_T1), ctx);
+ emit(hppa_shrpw(rs, HPPA_REG_T1, 8, rd), ctx);
+}
+
+static void emit_zext64(const s8 *dst, struct hppa_jit_context *ctx)
+{
+ const s8 *rd;
+ const s8 *tmp1 = regmap[TMP_REG_1];
+
+ rd = bpf_get_reg64(dst, tmp1, ctx);
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int s, e, paoff, i = insn - ctx->prog->insnsi;
+ u8 code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ const s8 *dst = regmap[insn->dst_reg];
+ const s8 *src = regmap[insn->src_reg];
+ const s8 *tmp1 = regmap[TMP_REG_1];
+ const s8 *tmp2 = regmap[TMP_REG_2];
+
+ if (0) printk("CLASS %03d CODE %#02x ALU64:%d BPF_SIZE %#02x "
+ "BPF_CODE %#02x src_reg %d dst_reg %d\n",
+ BPF_CLASS(code), code, (code & BPF_ALU64) ? 1:0, BPF_SIZE(code),
+ BPF_OP(code), insn->src_reg, insn->dst_reg);
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r64(dst, src, ctx, BPF_OP(code));
+ break;
+
+ /* dst = -dst */
+ case BPF_ALU64 | BPF_NEG:
+ emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit_alu_i64(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_X:
+ if (imm == 1) {
+ /* Special mov32 for zext. */
+ emit_zext64(dst, ctx);
+ break;
+ }
+ fallthrough;
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU | BPF_MUL | BPF_K:
+
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_DIV | BPF_K:
+
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_K:
+
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r32(dst, src, ctx, BPF_OP(code));
+ break;
+
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ /*
+ * mul,div,mod are handled in the BPF_X case.
+ */
+ emit_alu_i32(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ /*
+ * src is ignored---choose tmp2 as a dummy register since it
+ * is not on the stack.
+ */
+ emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 64 bits */
+ emit(hppa_extru(lo(rd), 31, 16, lo(rd)), ctx);
+ fallthrough;
+ case 32:
+ /* zero-extend 32 bits into 64 bits */
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case 64:
+ /* Do nothing. */
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ emit_rev16(lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case 32:
+ emit_rev32(lo(rd), lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
+ break;
+ case 64:
+ /* Swap upper and lower halves, then each half. */
+ emit_hppa_copy(hi(rd), HPPA_REG_T0, ctx);
+ emit_rev32(lo(rd), hi(rd), ctx);
+ emit_rev32(HPPA_REG_T0, lo(rd), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ paoff = hppa_offset(i, off, ctx);
+ emit_jump(paoff, false, ctx);
+ break;
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed;
+ int ret;
+ u64 addr;
+
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
+ &fixed);
+ if (ret < 0)
+ return ret;
+ emit_call(fixed, addr, ctx);
+ break;
+ }
+ /* tail call */
+ case BPF_JMP | BPF_TAIL_CALL:
+ REG_SET_SEEN_ALL(ctx);
+ if (emit_bpf_tail_call(i, ctx))
+ return -1;
+ break;
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ paoff = hppa_offset(i, off, ctx);
+ if (BPF_SRC(code) == BPF_K) {
+ s = ctx->ninsns;
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ e = ctx->ninsns;
+ paoff -= (e - s);
+ }
+ if (is64)
+ emit_branch_r64(dst, src, paoff, ctx, BPF_OP(code));
+ else
+ emit_branch_r32(dst, src, paoff, ctx, BPF_OP(code));
+ break;
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+ /* load epilogue function pointer and jump to it. */
+ emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+ break;
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ u32 upper = insn1.imm;
+ u32 lower = imm;
+ const s8 *rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
+
+ if (0 && bpf_pseudo_func(insn)) {
+ WARN_ON(upper); /* we are 32-bit! */
+ upper = 0;
+ lower = (uintptr_t) dereference_function_descriptor(lower);
+ }
+
+ emit_imm64(rd, upper, lower, ctx);
+ bpf_put_reg64(dst, rd, ctx);
+ return 1;
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
+ return -1;
+ break;
+
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_DW:
+
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ if (BPF_CLASS(code) == BPF_ST) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+
+ if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+ BPF_MODE(code)))
+ return -1;
+ break;
+
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ pr_info_once(
+ "bpf-jit: not supported: atomic operation %02x ***\n",
+ insn->imm);
+ return -EFAULT;
+
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bpf_jit_build_prologue(struct hppa_jit_context *ctx)
+{
+ const s8 *tmp = regmap[TMP_REG_1];
+ const s8 *dst, *reg;
+ int stack_adjust = 0;
+ int i;
+ unsigned long addr;
+ int bpf_stack_adjust;
+
+ /*
+ * stack on hppa grows up, so if tail calls are used we need to
+ * allocate the maximum stack size
+ */
+ if (REG_ALL_SEEN(ctx))
+ bpf_stack_adjust = MAX_BPF_STACK;
+ else
+ bpf_stack_adjust = ctx->prog->aux->stack_depth;
+ bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN);
+
+ /* make space for callee-saved registers. */
+ stack_adjust += NR_SAVED_REGISTERS * REG_SIZE;
+ /* make space for BPF registers on stack. */
+ stack_adjust += BPF_JIT_SCRATCH_REGS * REG_SIZE;
+ /* make space for BPF stack. */
+ stack_adjust += bpf_stack_adjust;
+ /* round up for stack alignment. */
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
+
+ /*
+ * The first instruction sets the tail-call-counter (TCC) register.
+ * This instruction is skipped by tail calls.
+ * Use a temporary register instead of a caller-saved register initially.
+ */
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx);
+
+ /*
+ * skip all initializations when called as BPF TAIL call.
+ */
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx);
+ emit(hppa_bne(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, ctx->prologue_len - 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+
+ /* set up hppa stack frame. */
+ emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx); // copy sp,r1 (=prev_sp)
+ emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx); // ldo stack_adjust(sp),sp (increase stack)
+ emit(hppa_stw(HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx); // stw prev_sp,-0x04(sp)
+ emit(hppa_stw(HPPA_REG_RP, -0x14, HPPA_REG_SP), ctx); // stw rp,-0x14(sp)
+
+ REG_FORCE_SEEN(ctx, HPPA_REG_T0);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T1);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T2);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T3);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T4);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T5);
+
+ /* save callee-save registers. */
+ for (i = 3; i <= 18; i++) {
+ if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
+ continue;
+ emit(hppa_stw(HPPA_R(i), -REG_SIZE * (8 + (i-3)), HPPA_REG_SP), ctx); // stw ri,-save_area(sp)
+ }
+
+ /*
+ * now really set the tail call counter (TCC) register.
+ */
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx);
+
+ /*
+ * save epilogue function pointer for outer TCC call chain.
+ * The main TCC call stores the final RP on stack.
+ */
+ addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset];
+ /* skip first two instructions of exit function, which jump to exit */
+ addr += 2 * HPPA_INSN_SIZE;
+ emit(hppa_ldil(addr, HPPA_REG_T2), ctx);
+ emit(hppa_ldo(im11(addr), HPPA_REG_T2, HPPA_REG_T2), ctx);
+ emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx);
+
+ /* load R1 & R2 from registers, R3-R5 from stack. */
+ /* use HPPA_REG_R1 which holds the old stack value */
+ dst = regmap[BPF_REG_5];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit(hppa_ldw(-0x48, HPPA_REG_R1, hi(reg)), ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit(hppa_ldw(-0x44, HPPA_REG_R1, lo(reg)), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ dst = regmap[BPF_REG_4];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit(hppa_ldw(-0x40, HPPA_REG_R1, hi(reg)), ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit(hppa_ldw(-0x3c, HPPA_REG_R1, lo(reg)), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ dst = regmap[BPF_REG_3];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit(hppa_ldw(-0x38, HPPA_REG_R1, hi(reg)), ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit(hppa_ldw(-0x34, HPPA_REG_R1, lo(reg)), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ dst = regmap[BPF_REG_2];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit_hppa_copy(HPPA_REG_ARG3, hi(reg), ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit_hppa_copy(HPPA_REG_ARG2, lo(reg), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ dst = regmap[BPF_REG_1];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit_hppa_copy(HPPA_REG_ARG1, hi(reg), ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit_hppa_copy(HPPA_REG_ARG0, lo(reg), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ /* Set up BPF frame pointer. */
+ dst = regmap[BPF_REG_FP];
+ reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
+ if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
+ if (REG_WAS_SEEN(ctx, lo(reg)))
+ emit(hppa_ldo(-REG_SIZE * (NR_SAVED_REGISTERS + BPF_JIT_SCRATCH_REGS),
+ HPPA_REG_SP, lo(reg)), ctx);
+ if (REG_WAS_SEEN(ctx, hi(reg)))
+ emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx);
+ bpf_put_reg64(dst, tmp, ctx);
+ }
+
+ emit(hppa_nop(), ctx);
+}
+
+void bpf_jit_build_epilogue(struct hppa_jit_context *ctx)
+{
+ __build_epilogue(false, ctx);
+}
diff --git a/arch/parisc/net/bpf_jit_comp64.c b/arch/parisc/net/bpf_jit_comp64.c
new file mode 100644
index 000000000000..54b0d5e25e02
--- /dev/null
+++ b/arch/parisc/net/bpf_jit_comp64.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BPF JIT compiler for PA-RISC (64-bit)
+ *
+ * Copyright(c) 2023 Helge Deller <deller@gmx.de>
+ *
+ * The code is based on the BPF JIT compiler for RV64 by Björn Töpel.
+ *
+ * TODO:
+ * - check if bpf_jit_needs_zext() is needed (currently enabled)
+ * - implement arch_prepare_bpf_trampoline(), poke(), ...
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/libgcc.h>
+#include "bpf_jit.h"
+
+static const int regmap[] = {
+ [BPF_REG_0] = HPPA_REG_RET0,
+ [BPF_REG_1] = HPPA_R(5),
+ [BPF_REG_2] = HPPA_R(6),
+ [BPF_REG_3] = HPPA_R(7),
+ [BPF_REG_4] = HPPA_R(8),
+ [BPF_REG_5] = HPPA_R(9),
+ [BPF_REG_6] = HPPA_R(10),
+ [BPF_REG_7] = HPPA_R(11),
+ [BPF_REG_8] = HPPA_R(12),
+ [BPF_REG_9] = HPPA_R(13),
+ [BPF_REG_FP] = HPPA_R(14),
+ [BPF_REG_AX] = HPPA_R(15),
+};
+
+/*
+ * Stack layout during BPF program execution (note: stack grows up):
+ *
+ * high
+ * HPPA64 sp => +----------+ <= HPPA64 fp
+ * | saved sp |
+ * | saved rp |
+ * | ... | HPPA64 callee-saved registers
+ * | curr args|
+ * | local var|
+ * +----------+ <= (BPF FP)
+ * | |
+ * | ... | BPF program stack
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +----------+
+ * low
+ */
+
+/* Offset from fp for BPF registers stored on stack. */
+#define STACK_ALIGN FRAME_SIZE
+
+#define EXIT_PTR_LOAD(reg) hppa64_ldd_im16(-FRAME_SIZE, HPPA_REG_SP, reg)
+#define EXIT_PTR_STORE(reg) hppa64_std_im16(reg, -FRAME_SIZE, HPPA_REG_SP)
+#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop)
+
+static u8 bpf_to_hppa_reg(int bpf_reg, struct hppa_jit_context *ctx)
+{
+ u8 reg = regmap[bpf_reg];
+
+ REG_SET_SEEN(ctx, reg);
+ return reg;
+};
+
+static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
+{
+ REG_SET_SEEN(ctx, rd);
+ if (OPTIMIZE_HPPA && (rs == rd))
+ return;
+ REG_SET_SEEN(ctx, rs);
+ emit(hppa_copy(rs, rd), ctx);
+}
+
+static void emit_hppa64_depd(u8 src, u8 pos, u8 len, u8 target, bool no_zero, struct hppa_jit_context *ctx)
+{
+ int c;
+
+ pos &= (BITS_PER_LONG - 1);
+ pos = 63 - pos;
+ len = 64 - len;
+ c = (len < 32) ? 0x4 : 0;
+ c |= (pos >= 32) ? 0x2 : 0;
+ c |= (no_zero) ? 0x1 : 0;
+ emit(hppa_t10_insn(0x3c, target, src, 0, c, pos & 0x1f, len & 0x1f), ctx);
+}
+
+static void emit_hppa64_shld(u8 src, int num, u8 target, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_depd(src, 63-num, 64-num, target, 0, ctx);
+}
+
+static void emit_hppa64_extrd(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx)
+{
+ int c;
+
+ pos &= (BITS_PER_LONG - 1);
+ len = 64 - len;
+ c = (len < 32) ? 0x4 : 0;
+ c |= (pos >= 32) ? 0x2 : 0;
+ c |= signed_op ? 0x1 : 0;
+ emit(hppa_t10_insn(0x36, src, target, 0, c, pos & 0x1f, len & 0x1f), ctx);
+}
+
+static void emit_hppa64_extrw(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx)
+{
+ int c;
+
+ pos &= (32 - 1);
+ len = 32 - len;
+ c = 0x06 | (signed_op ? 1 : 0);
+ emit(hppa_t10_insn(0x34, src, target, 0, c, pos, len), ctx);
+}
+
+#define emit_hppa64_zext32(r, target, ctx) \
+ emit_hppa64_extrd(r, 63, 32, target, false, ctx)
+#define emit_hppa64_sext32(r, target, ctx) \
+ emit_hppa64_extrd(r, 63, 32, target, true, ctx)
+
+static void emit_hppa64_shrd(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_extrd(src, 63-num, 64-num, target, signed_op, ctx);
+}
+
+static void emit_hppa64_shrw(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_extrw(src, 31-num, 32-num, target, signed_op, ctx);
+}
+
+/* Emit variable-length instructions for 32-bit imm */
+static void emit_imm32(u8 rd, s32 imm, struct hppa_jit_context *ctx)
+{
+ u32 lower = im11(imm);
+
+ REG_SET_SEEN(ctx, rd);
+ if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) {
+ emit(hppa_ldi(imm, rd), ctx);
+ return;
+ }
+ if (OPTIMIZE_HPPA && lower == imm) {
+ emit(hppa_ldo(lower, HPPA_REG_ZERO, rd), ctx);
+ return;
+ }
+ emit(hppa_ldil(imm, rd), ctx);
+ if (OPTIMIZE_HPPA && (lower == 0))
+ return;
+ emit(hppa_ldo(lower, rd, rd), ctx);
+}
+
+static bool is_32b_int(s64 val)
+{
+ return val == (s32) val;
+}
+
+/* Emit variable-length instructions for 64-bit imm */
+static void emit_imm(u8 rd, s64 imm, u8 tmpreg, struct hppa_jit_context *ctx)
+{
+ u32 upper32;
+
+ /* get lower 32-bits into rd, sign extended */
+ emit_imm32(rd, imm, ctx);
+
+ /* do we have upper 32-bits too ? */
+ if (OPTIMIZE_HPPA && is_32b_int(imm))
+ return;
+
+ /* load upper 32-bits into lower tmpreg and deposit into rd */
+ upper32 = imm >> 32;
+ if (upper32 || !OPTIMIZE_HPPA) {
+ emit_imm32(tmpreg, upper32, ctx);
+ emit_hppa64_depd(tmpreg, 31, 32, rd, 1, ctx);
+ } else
+ emit_hppa64_depd(HPPA_REG_ZERO, 31, 32, rd, 1, ctx);
+
+}
+
+static int emit_jump(signed long paoff, bool force_far,
+ struct hppa_jit_context *ctx)
+{
+ unsigned long pc, addr;
+
+ /* Note: Use 2 instructions for jumps if force_far is set. */
+ if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 22)) {
+ /* use BL,long branch followed by nop() */
+ emit(hppa64_bl_long(paoff - HPPA_BRANCH_DISPLACEMENT), ctx);
+ if (force_far)
+ emit(hppa_nop(), ctx);
+ return 0;
+ }
+
+ pc = (uintptr_t) &ctx->insns[ctx->ninsns];
+ addr = pc + (paoff * HPPA_INSN_SIZE);
+ /* even the 64-bit kernel runs in memory below 4GB */
+ if (WARN_ON_ONCE(addr >> 32))
+ return -E2BIG;
+ emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
+ emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx);
+ return 0;
+}
+
+static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx)
+{
+ int i;
+
+ if (is_tail_call) {
+ /*
+ * goto *(t0 + 4);
+ * Skips first instruction of prologue which initializes tail
+ * call counter. Assumes t0 contains address of target program,
+ * see emit_bpf_tail_call.
+ */
+ emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx);
+ /* in delay slot: */
+ emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx);
+
+ return;
+ }
+
+ /* load epilogue function pointer and jump to it. */
+ /* exit point is either at next instruction, or the outest TCC exit function */
+ emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /* NOTE: we are 64-bit and big-endian, so return lower sign-extended 32-bit value */
+ emit_hppa64_sext32(regmap[BPF_REG_0], HPPA_REG_RET0, ctx);
+
+ /* Restore callee-saved registers. */
+ for (i = 3; i <= 15; i++) {
+ if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
+ continue;
+ emit(hppa64_ldd_im16(-REG_SIZE * i, HPPA_REG_SP, HPPA_R(i)), ctx);
+ }
+
+ /* load original return pointer (stored by outest TCC function) */
+ emit(hppa64_ldd_im16(-2*REG_SIZE, HPPA_REG_SP, HPPA_REG_RP), ctx);
+ emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx);
+ /* in delay slot: */
+ emit(hppa64_ldd_im5(-REG_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ emit(hppa_nop(), ctx); // XXX WARUM einer zu wenig ??
+}
+
+static int emit_branch(u8 op, u8 rd, u8 rs, signed long paoff,
+ struct hppa_jit_context *ctx)
+{
+ int e, s;
+ bool far = false;
+ int off;
+
+ if (op == BPF_JSET) {
+ /*
+ * BPF_JSET is a special case: it has no inverse so translate
+ * to and() function and compare against zero
+ */
+ emit(hppa_and(rd, rs, HPPA_REG_T0), ctx);
+ paoff -= 1; /* reduce offset due to hppa_and() above */
+ rd = HPPA_REG_T0;
+ rs = HPPA_REG_ZERO;
+ op = BPF_JNE;
+ }
+
+ /* set start after BPF_JSET */
+ s = ctx->ninsns;
+
+ if (!relative_branch_ok(paoff - HPPA_BRANCH_DISPLACEMENT + 1, 12)) {
+ op = invert_bpf_cond(op);
+ far = true;
+ }
+
+ /*
+ * For a far branch, the condition is negated and we jump over the
+ * branch itself, and the two instructions from emit_jump.
+ * For a near branch, just use paoff.
+ */
+ off = far ? (2 - HPPA_BRANCH_DISPLACEMENT) : paoff - HPPA_BRANCH_DISPLACEMENT;
+
+ switch (op) {
+ /* IF (dst COND src) JUMP off */
+ case BPF_JEQ:
+ emit(hppa_beq(rd, rs, off), ctx);
+ break;
+ case BPF_JGT:
+ emit(hppa_bgtu(rd, rs, off), ctx);
+ break;
+ case BPF_JLT:
+ emit(hppa_bltu(rd, rs, off), ctx);
+ break;
+ case BPF_JGE:
+ emit(hppa_bgeu(rd, rs, off), ctx);
+ break;
+ case BPF_JLE:
+ emit(hppa_bleu(rd, rs, off), ctx);
+ break;
+ case BPF_JNE:
+ emit(hppa_bne(rd, rs, off), ctx);
+ break;
+ case BPF_JSGT:
+ emit(hppa_bgt(rd, rs, off), ctx);
+ break;
+ case BPF_JSLT:
+ emit(hppa_blt(rd, rs, off), ctx);
+ break;
+ case BPF_JSGE:
+ emit(hppa_bge(rd, rs, off), ctx);
+ break;
+ case BPF_JSLE:
+ emit(hppa_ble(rd, rs, off), ctx);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (far) {
+ int ret;
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ paoff -= (e - s);
+ ret = emit_jump(paoff, true, ctx);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * always allocate 2 nops instead of the far branch to
+ * reduce translation loops
+ */
+ emit(hppa_nop(), ctx);
+ emit(hppa_nop(), ctx);
+ }
+ return 0;
+}
+
+static void emit_zext_32(u8 reg, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_zext32(reg, reg, ctx);
+}
+
+static void emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx)
+{
+ /*
+ * R1 -> &ctx
+ * R2 -> &array
+ * R3 -> index
+ */
+ int off;
+ const s8 arr_reg = regmap[BPF_REG_2];
+ const s8 idx_reg = regmap[BPF_REG_3];
+ struct bpf_array bpfa;
+ struct bpf_prog bpfp;
+
+ /* if there is any tail call, we need to save & restore all registers */
+ REG_SET_SEEN_ALL(ctx);
+
+ /* get address of TCC main exit function for error case into rp */
+ emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
+
+ /* max_entries = array->map.max_entries; */
+ off = offsetof(struct bpf_array, map.max_entries);
+ BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
+ emit(hppa_ldw(off, arr_reg, HPPA_REG_T1), ctx);
+
+ /*
+ * if (index >= max_entries)
+ * goto out;
+ */
+ emit(hppa_bltu(idx_reg, HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * if (--tcc < 0)
+ * goto out;
+ */
+ REG_FORCE_SEEN(ctx, HPPA_REG_TCC);
+ emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx);
+ emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 8);
+ emit(hppa64_shladd(idx_reg, 3, arr_reg, HPPA_REG_T0), ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ BUILD_BUG_ON(off < 16);
+ emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
+
+ /*
+ * tcc = temp_tcc;
+ * goto *(prog->bpf_func + 4);
+ */
+ off = offsetof(struct bpf_prog, bpf_func);
+ BUILD_BUG_ON(off < 16);
+ BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 8);
+ emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
+ /* Epilogue jumps to *(t0 + 4). */
+ __build_epilogue(true, ctx);
+}
+
+static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
+ struct hppa_jit_context *ctx)
+{
+ u8 code = insn->code;
+
+ switch (code) {
+ case BPF_JMP | BPF_JA:
+ case BPF_JMP | BPF_CALL:
+ case BPF_JMP | BPF_EXIT:
+ case BPF_JMP | BPF_TAIL_CALL:
+ break;
+ default:
+ *rd = bpf_to_hppa_reg(insn->dst_reg, ctx);
+ }
+
+ if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
+ code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
+ code & BPF_LDX || code & BPF_STX)
+ *rs = bpf_to_hppa_reg(insn->src_reg, ctx);
+}
+
+static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx);
+ *rd = HPPA_REG_T2;
+ emit_hppa64_zext32(*rs, HPPA_REG_T1, ctx);
+ *rs = HPPA_REG_T1;
+}
+
+static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx);
+ *rd = HPPA_REG_T2;
+ emit_hppa64_sext32(*rs, HPPA_REG_T1, ctx);
+ *rs = HPPA_REG_T1;
+}
+
+static void emit_zext_32_rd_t1(u8 *rd, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx);
+ *rd = HPPA_REG_T2;
+ emit_zext_32(HPPA_REG_T1, ctx);
+}
+
+static void emit_sext_32_rd(u8 *rd, struct hppa_jit_context *ctx)
+{
+ emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx);
+ *rd = HPPA_REG_T2;
+}
+
+static bool is_signed_bpf_cond(u8 cond)
+{
+ return cond == BPF_JSGT || cond == BPF_JSLT ||
+ cond == BPF_JSGE || cond == BPF_JSLE;
+}
+
+static void emit_call(u64 addr, bool fixed, struct hppa_jit_context *ctx)
+{
+ const int offset_sp = 2*FRAME_SIZE;
+
+ emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ emit_hppa_copy(regmap[BPF_REG_1], HPPA_REG_ARG0, ctx);
+ emit_hppa_copy(regmap[BPF_REG_2], HPPA_REG_ARG1, ctx);
+ emit_hppa_copy(regmap[BPF_REG_3], HPPA_REG_ARG2, ctx);
+ emit_hppa_copy(regmap[BPF_REG_4], HPPA_REG_ARG3, ctx);
+ emit_hppa_copy(regmap[BPF_REG_5], HPPA_REG_ARG4, ctx);
+
+ /* Backup TCC. */
+ REG_FORCE_SEEN(ctx, HPPA_REG_TCC_SAVED);
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx);
+
+ /*
+ * Use ldil() to load absolute address. Don't use emit_imm as the
+ * number of emitted instructions should not depend on the value of
+ * addr.
+ */
+ WARN_ON(addr >> 32);
+ /* load function address and gp from Elf64_Fdesc descriptor */
+ emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
+ emit(hppa_ldo(im11(addr), HPPA_REG_R31, HPPA_REG_R31), ctx);
+ emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr),
+ HPPA_REG_R31, HPPA_REG_RP), ctx);
+ emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx);
+ emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp),
+ HPPA_REG_R31, HPPA_REG_GP), ctx);
+
+ /* Restore TCC. */
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx);
+
+ emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ /* Set return value. */
+ emit_hppa_copy(HPPA_REG_RET0, regmap[BPF_REG_0], ctx);
+}
+
+static void emit_call_libgcc_ll(void *func, const s8 arg0,
+ const s8 arg1, u8 opcode, struct hppa_jit_context *ctx)
+{
+ u64 func_addr;
+
+ if (BPF_CLASS(opcode) == BPF_ALU) {
+ emit_hppa64_zext32(arg0, HPPA_REG_ARG0, ctx);
+ emit_hppa64_zext32(arg1, HPPA_REG_ARG1, ctx);
+ } else {
+ emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx);
+ emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx);
+ }
+
+ /* libcgcc overwrites HPPA_REG_RET0, so keep copy in HPPA_REG_TCC_SAVED */
+ if (arg0 != HPPA_REG_RET0) {
+ REG_SET_SEEN(ctx, HPPA_REG_TCC_SAVED);
+ emit(hppa_copy(HPPA_REG_RET0, HPPA_REG_TCC_SAVED), ctx);
+ }
+
+ /* set up stack */
+ emit(hppa_ldo(FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ func_addr = (uintptr_t) func;
+ /* load function func_address and gp from Elf64_Fdesc descriptor */
+ emit_imm(HPPA_REG_R31, func_addr, arg0, ctx);
+ emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr),
+ HPPA_REG_R31, HPPA_REG_RP), ctx);
+ /* skip the following bve_l instruction if divisor is 0. */
+ if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
+ if (BPF_OP(opcode) == BPF_DIV)
+ emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx);
+ else {
+ emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET0, ctx);
+ }
+ emit(hppa_beq(HPPA_REG_ARG1, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ }
+ emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx);
+ emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp),
+ HPPA_REG_R31, HPPA_REG_GP), ctx);
+
+ emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
+
+ emit_hppa_copy(HPPA_REG_RET0, arg0, ctx);
+
+ /* restore HPPA_REG_RET0 */
+ if (arg0 != HPPA_REG_RET0)
+ emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_RET0), ctx);
+}
+
+static void emit_store(const s8 rd, const s8 rs, s16 off,
+ struct hppa_jit_context *ctx, const u8 size,
+ const u8 mode)
+{
+ s8 dstreg;
+
+ /* need to calculate address since offset does not fit in 14 bits? */
+ if (relative_bits_ok(off, 14))
+ dstreg = rd;
+ else {
+ /* need to use R1 here, since addil puts result into R1 */
+ dstreg = HPPA_REG_R1;
+ emit(hppa_addil(off, rd), ctx);
+ off = im11(off);
+ }
+
+ switch (size) {
+ case BPF_B:
+ emit(hppa_stb(rs, off, dstreg), ctx);
+ break;
+ case BPF_H:
+ emit(hppa_sth(rs, off, dstreg), ctx);
+ break;
+ case BPF_W:
+ emit(hppa_stw(rs, off, dstreg), ctx);
+ break;
+ case BPF_DW:
+ if (off & 7) {
+ emit(hppa_ldo(off, dstreg, HPPA_REG_R1), ctx);
+ emit(hppa64_std_im5(rs, 0, HPPA_REG_R1), ctx);
+ } else if (off >= -16 && off <= 15)
+ emit(hppa64_std_im5(rs, off, dstreg), ctx);
+ else
+ emit(hppa64_std_im16(rs, off, dstreg), ctx);
+ break;
+ }
+}
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int s, e, ret, i = insn - ctx->prog->insnsi;
+ s64 paoff;
+ struct bpf_prog_aux *aux = ctx->prog->aux;
+ u8 rd = -1, rs = -1, code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ init_regs(&rd, &rs, insn, ctx);
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (imm == 1) {
+ /* Special mov32 for zext */
+ emit_zext_32(rd, ctx);
+ break;
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_hppa64_zext32(rs, rd, ctx);
+ else
+ emit_hppa_copy(rs, rd, ctx);
+ break;
+
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ emit(hppa_add(rd, rs, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ emit(hppa_sub(rd, rs, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ emit(hppa_and(rd, rs, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ emit(hppa_or(rd, rs, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ emit(hppa_xor(rd, rs, rd), ctx);
+ if (!is64 && !aux->verifier_zext && rs != rd)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
+ rs = HPPA_REG_T1;
+ fallthrough;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ emit_call_libgcc_ll(__muldi3, rd, rs, code, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
+ rs = HPPA_REG_T1;
+ fallthrough;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit_call_libgcc_ll(&hppa_div64, rd, rs, code, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
+ rs = HPPA_REG_T1;
+ fallthrough;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, code, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit_hppa64_sext32(rs, HPPA_REG_T0, ctx);
+ emit(hppa64_mtsarcm(HPPA_REG_T0), ctx);
+ if (is64)
+ emit(hppa64_depdz_sar(rd, rd), ctx);
+ else
+ emit(hppa_depwz_sar(rd, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ emit(hppa_mtsar(rs), ctx);
+ if (is64)
+ emit(hppa64_shrpd_sar(rd, rd), ctx);
+ else
+ emit(hppa_shrpw_sar(rd, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ emit_hppa64_sext32(rs, HPPA_REG_T0, ctx);
+ emit(hppa64_mtsarcm(HPPA_REG_T0), ctx);
+ if (is64)
+ emit(hppa_extrd_sar(rd, rd, 1), ctx);
+ else
+ emit(hppa_extrws_sar(rd, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ emit(hppa_sub(HPPA_REG_ZERO, rd, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 64 bits */
+ emit_hppa64_depd(HPPA_REG_ZERO, 63-16, 64-16, rd, 1, ctx);
+ break;
+ case 32:
+ if (!aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case 64:
+ /* Do nothing */
+ break;
+ }
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm) {
+ case 16:
+ emit(hppa_extru(rd, 31 - 8, 8, HPPA_REG_T1), ctx);
+ emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx);
+ emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx);
+ emit_hppa64_extrd(HPPA_REG_T1, 63, 16, rd, 0, ctx);
+ break;
+ case 32:
+ emit(hppa_shrpw(rd, rd, 16, HPPA_REG_T1), ctx);
+ emit_hppa64_depd(HPPA_REG_T1, 63-16, 8, HPPA_REG_T1, 1, ctx);
+ emit(hppa_shrpw(rd, HPPA_REG_T1, 8, HPPA_REG_T1), ctx);
+ emit_hppa64_extrd(HPPA_REG_T1, 63, 32, rd, 0, ctx);
+ break;
+ case 64:
+ emit(hppa64_permh_3210(rd, HPPA_REG_T1), ctx);
+ emit(hppa64_hshl(HPPA_REG_T1, 8, HPPA_REG_T2), ctx);
+ emit(hppa64_hshr_u(HPPA_REG_T1, 8, HPPA_REG_T1), ctx);
+ emit(hppa_or(HPPA_REG_T2, HPPA_REG_T1, rd), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+ break;
+
+ /* dst = imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ emit_imm(rd, imm, HPPA_REG_T2, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ if (relative_bits_ok(imm, 14)) {
+ emit(hppa_ldo(imm, rd, rd), ctx);
+ } else {
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_add(rd, HPPA_REG_T1, rd), ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ if (relative_bits_ok(-imm, 14)) {
+ emit(hppa_ldo(-imm, rd, rd), ctx);
+ } else {
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_sub(rd, HPPA_REG_T1, rd), ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_and(rd, HPPA_REG_T1, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_or(rd, HPPA_REG_T1, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_xor(rd, HPPA_REG_T1, rd), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ if (imm != 0) {
+ emit_hppa64_shld(rd, imm, rd, ctx);
+ }
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ if (imm != 0) {
+ if (is64)
+ emit_hppa64_shrd(rd, imm, rd, false, ctx);
+ else
+ emit_hppa64_shrw(rd, imm, rd, false, ctx);
+ }
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ if (imm != 0) {
+ if (is64)
+ emit_hppa64_shrd(rd, imm, rd, true, ctx);
+ else
+ emit_hppa64_shrw(rd, imm, rd, true, ctx);
+ }
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ paoff = hppa_offset(i, off, ctx);
+ ret = emit_jump(paoff, false, ctx);
+ if (ret)
+ return ret;
+ break;
+
+ /* IF (dst COND src) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ paoff = hppa_offset(i, off, ctx);
+ if (!is64) {
+ s = ctx->ninsns;
+ if (is_signed_bpf_cond(BPF_OP(code)))
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ else
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ e = ctx->ninsns;
+
+ /* Adjust for extra insns */
+ paoff -= (e - s);
+ }
+ if (BPF_OP(code) == BPF_JSET) {
+ /* Adjust for and */
+ paoff -= 1;
+ emit(hppa_and(rs, rd, HPPA_REG_T1), ctx);
+ emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff,
+ ctx);
+ } else {
+ emit_branch(BPF_OP(code), rd, rs, paoff, ctx);
+ }
+ break;
+
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ paoff = hppa_offset(i, off, ctx);
+ s = ctx->ninsns;
+ if (imm) {
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ rs = HPPA_REG_T1;
+ } else {
+ rs = HPPA_REG_ZERO;
+ }
+ if (!is64) {
+ if (is_signed_bpf_cond(BPF_OP(code)))
+ emit_sext_32_rd(&rd, ctx);
+ else
+ emit_zext_32_rd_t1(&rd, ctx);
+ }
+ e = ctx->ninsns;
+
+ /* Adjust for extra insns */
+ paoff -= (e - s);
+ emit_branch(BPF_OP(code), rd, rs, paoff, ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ paoff = hppa_offset(i, off, ctx);
+ s = ctx->ninsns;
+ emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
+ emit(hppa_and(HPPA_REG_T1, rd, HPPA_REG_T1), ctx);
+ /* For jset32, we should clear the upper 32 bits of t1, but
+ * sign-extension is sufficient here and saves one instruction,
+ * as t1 is used only in comparison against zero.
+ */
+ if (!is64 && imm < 0)
+ emit_hppa64_sext32(HPPA_REG_T1, HPPA_REG_T1, ctx);
+ e = ctx->ninsns;
+ paoff -= (e - s);
+ emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff, ctx);
+ break;
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed_addr;
+ u64 addr;
+
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &addr, &fixed_addr);
+ if (ret < 0)
+ return ret;
+
+ REG_SET_SEEN_ALL(ctx);
+ emit_call(addr, fixed_addr, ctx);
+ break;
+ }
+ /* tail call */
+ case BPF_JMP | BPF_TAIL_CALL:
+ emit_bpf_tail_call(i, ctx);
+ break;
+
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+
+ paoff = epilogue_offset(ctx);
+ ret = emit_jump(paoff, false, ctx);
+ if (ret)
+ return ret;
+ break;
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ u64 imm64 = (u64)insn1.imm << 32 | (u32)imm;
+ if (bpf_pseudo_func(insn))
+ imm64 = (uintptr_t)dereference_function_descriptor((void*)imm64);
+ emit_imm(rd, imm64, HPPA_REG_T2, ctx);
+
+ return 1;
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ {
+ u8 srcreg;
+
+ /* need to calculate address since offset does not fit in 14 bits? */
+ if (relative_bits_ok(off, 14))
+ srcreg = rs;
+ else {
+ /* need to use R1 here, since addil puts result into R1 */
+ srcreg = HPPA_REG_R1;
+ BUG_ON(rs == HPPA_REG_R1);
+ BUG_ON(rd == HPPA_REG_R1);
+ emit(hppa_addil(off, rs), ctx);
+ off = im11(off);
+ }
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ emit(hppa_ldb(off, srcreg, rd), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_H:
+ emit(hppa_ldh(off, srcreg, rd), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_W:
+ emit(hppa_ldw(off, srcreg, rd), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_DW:
+ if (off & 7) {
+ emit(hppa_ldo(off, srcreg, HPPA_REG_R1), ctx);
+ emit(hppa64_ldd_reg(HPPA_REG_ZERO, HPPA_REG_R1, rd), ctx);
+ } else if (off >= -16 && off <= 15)
+ emit(hppa64_ldd_im5(off, srcreg, rd), ctx);
+ else
+ emit(hppa64_ldd_im16(off, srcreg, rd), ctx);
+ break;
+ }
+ break;
+ }
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_DW:
+
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ if (BPF_CLASS(code) == BPF_ST) {
+ emit_imm(HPPA_REG_T2, imm, HPPA_REG_T1, ctx);
+ rs = HPPA_REG_T2;
+ }
+
+ emit_store(rd, rs, off, ctx, BPF_SIZE(code), BPF_MODE(code));
+ break;
+
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ pr_info_once(
+ "bpf-jit: not supported: atomic operation %02x ***\n",
+ insn->imm);
+ return -EFAULT;
+
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bpf_jit_build_prologue(struct hppa_jit_context *ctx)
+{
+ int bpf_stack_adjust, stack_adjust, i;
+ unsigned long addr;
+ s8 reg;
+
+ /*
+ * stack on hppa grows up, so if tail calls are used we need to
+ * allocate the maximum stack size
+ */
+ if (REG_ALL_SEEN(ctx))
+ bpf_stack_adjust = MAX_BPF_STACK;
+ else
+ bpf_stack_adjust = ctx->prog->aux->stack_depth;
+ bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN);
+
+ stack_adjust = FRAME_SIZE + bpf_stack_adjust;
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
+
+ /*
+ * NOTE: We construct an Elf64_Fdesc descriptor here.
+ * The first 4 words initialize the TCC and compares them.
+ * Then follows the virtual address of the eBPF function,
+ * and the gp for this function.
+ *
+ * The first instruction sets the tail-call-counter (TCC) register.
+ * This instruction is skipped by tail calls.
+ * Use a temporary register instead of a caller-saved register initially.
+ */
+ REG_FORCE_SEEN(ctx, HPPA_REG_TCC_IN_INIT);
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx);
+
+ /*
+ * Skip all initializations when called as BPF TAIL call.
+ */
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx);
+ emit(hppa_beq(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, 6 - HPPA_BRANCH_DISPLACEMENT), ctx);
+ emit(hppa64_bl_long(ctx->prologue_len - 3 - HPPA_BRANCH_DISPLACEMENT), ctx);
+
+ /* store entry address of this eBPF function */
+ addr = (uintptr_t) &ctx->insns[0];
+ emit(addr >> 32, ctx);
+ emit(addr & 0xffffffff, ctx);
+
+ /* store gp of this eBPF function */
+ asm("copy %%r27,%0" : "=r" (addr) );
+ emit(addr >> 32, ctx);
+ emit(addr & 0xffffffff, ctx);
+
+ /* Set up hppa stack frame. */
+ emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx);
+ emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx);
+ emit(hppa64_std_im5 (HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx);
+ emit(hppa64_std_im16(HPPA_REG_RP, -2*REG_SIZE, HPPA_REG_SP), ctx);
+
+ /* Save callee-save registers. */
+ for (i = 3; i <= 15; i++) {
+ if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
+ continue;
+ emit(hppa64_std_im16(HPPA_R(i), -REG_SIZE * i, HPPA_REG_SP), ctx);
+ }
+
+ /* load function parameters; load all if we use tail functions */
+ #define LOAD_PARAM(arg, dst) \
+ if (REG_WAS_SEEN(ctx, regmap[dst]) || \
+ REG_WAS_SEEN(ctx, HPPA_REG_TCC)) \
+ emit_hppa_copy(arg, regmap[dst], ctx)
+ LOAD_PARAM(HPPA_REG_ARG0, BPF_REG_1);
+ LOAD_PARAM(HPPA_REG_ARG1, BPF_REG_2);
+ LOAD_PARAM(HPPA_REG_ARG2, BPF_REG_3);
+ LOAD_PARAM(HPPA_REG_ARG3, BPF_REG_4);
+ LOAD_PARAM(HPPA_REG_ARG4, BPF_REG_5);
+ #undef LOAD_PARAM
+
+ REG_FORCE_SEEN(ctx, HPPA_REG_T0);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T1);
+ REG_FORCE_SEEN(ctx, HPPA_REG_T2);
+
+ /*
+ * Now really set the tail call counter (TCC) register.
+ */
+ if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
+ emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx);
+
+ /*
+ * Save epilogue function pointer for outer TCC call chain.
+ * The main TCC call stores the final RP on stack.
+ */
+ addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset];
+ /* skip first two instructions which jump to exit */
+ addr += 2 * HPPA_INSN_SIZE;
+ emit_imm(HPPA_REG_T2, addr, HPPA_REG_T1, ctx);
+ emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx);
+
+ /* Set up BPF frame pointer. */
+ reg = regmap[BPF_REG_FP]; /* -> HPPA_REG_FP */
+ if (REG_WAS_SEEN(ctx, reg)) {
+ emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, reg), ctx);
+ }
+}
+
+void bpf_jit_build_epilogue(struct hppa_jit_context *ctx)
+{
+ __build_epilogue(false, ctx);
+}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
new file mode 100644
index 000000000000..06cbcd6fe87b
--- /dev/null
+++ b/arch/parisc/net/bpf_jit_core.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common functionality for HPPA32 and HPPA64 BPF JIT compilers
+ *
+ * Copyright (c) 2023 Helge Deller <deller@gmx.de>
+ *
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+/* Number of iterations to try until offsets converge. */
+#define NR_JIT_ITERATIONS 35
+
+static int build_body(struct hppa_jit_context *ctx, bool extra_pass, int *offset)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ ctx->reg_seen_collect = true;
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
+ /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
+ if (ret > 0)
+ i++;
+ if (offset)
+ offset[i] = ctx->ninsns;
+ if (ret < 0)
+ return ret;
+ }
+ ctx->reg_seen_collect = false;
+ return 0;
+}
+
+bool bpf_jit_needs_zext(void)
+{
+ return true;
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ unsigned int prog_size = 0, extable_size = 0;
+ bool tmp_blinded = false, extra_pass = false;
+ struct bpf_prog *tmp, *orig_prog = prog;
+ int pass = 0, prev_ninsns = 0, prologue_len, i;
+ struct hppa_jit_data *jit_data;
+ struct hppa_jit_context *ctx;
+
+ if (!prog->jit_requested)
+ return orig_prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+
+ ctx = &jit_data->ctx;
+
+ if (ctx->offset) {
+ extra_pass = true;
+ prog_size = sizeof(*ctx->insns) * ctx->ninsns;
+ goto skip_init_ctx;
+ }
+
+ ctx->prog = prog;
+ ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (!ctx->offset) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+ for (i = 0; i < prog->len; i++) {
+ prev_ninsns += 20;
+ ctx->offset[i] = prev_ninsns;
+ }
+
+ for (i = 0; i < NR_JIT_ITERATIONS; i++) {
+ pass++;
+ ctx->ninsns = 0;
+ if (build_body(ctx, extra_pass, ctx->offset)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+ ctx->body_len = ctx->ninsns;
+ bpf_jit_build_prologue(ctx);
+ ctx->prologue_len = ctx->ninsns - ctx->body_len;
+ ctx->epilogue_offset = ctx->ninsns;
+ bpf_jit_build_epilogue(ctx);
+
+ if (ctx->ninsns == prev_ninsns) {
+ if (jit_data->header)
+ break;
+ /* obtain the actual image size */
+ extable_size = prog->aux->num_exentries *
+ sizeof(struct exception_table_entry);
+ prog_size = sizeof(*ctx->insns) * ctx->ninsns;
+
+ jit_data->header =
+ bpf_jit_binary_alloc(prog_size + extable_size,
+ &jit_data->image,
+ sizeof(long),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ ctx->insns = (u32 *)jit_data->image;
+ /*
+ * Now, when the image is allocated, the image can
+ * potentially shrink more (auipc/jalr -> jal).
+ */
+ }
+ prev_ninsns = ctx->ninsns;
+ }
+
+ if (i == NR_JIT_ITERATIONS) {
+ pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
+ if (jit_data->header)
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ if (extable_size)
+ prog->aux->extable = (void *)ctx->insns + prog_size;
+
+skip_init_ctx:
+ pass++;
+ ctx->ninsns = 0;
+
+ bpf_jit_build_prologue(ctx);
+ if (build_body(ctx, extra_pass, NULL)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+ bpf_jit_build_epilogue(ctx);
+
+ if (HPPA_JIT_DEBUG || bpf_jit_enable > 1) {
+ if (HPPA_JIT_DUMP)
+ bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
+ if (HPPA_JIT_REBOOT)
+ { extern int machine_restart(char *); machine_restart(""); }
+ }
+
+ prog->bpf_func = (void *)ctx->insns;
+ prog->jited = 1;
+ prog->jited_len = prog_size;
+
+ bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
+
+ if (!prog->is_func || extra_pass) {
+ if (bpf_jit_binary_lock_ro(jit_data->header)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ prog->jited_len = 0;
+ goto out_offset;
+ }
+ prologue_len = ctx->epilogue_offset - ctx->body_len;
+ for (i = 0; i < prog->len; i++)
+ ctx->offset[i] += prologue_len;
+ bpf_prog_fill_jited_linfo(prog, ctx->offset);
+out_offset:
+ kfree(ctx->offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
+out:
+ if (HPPA_JIT_REBOOT)
+ { extern int machine_restart(char *); machine_restart(""); }
+
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ return prog;
+}
+
+u64 hppa_div64(u64 div, u64 divisor)
+{
+ div = div64_u64(div, divisor);
+ return div;
+}
+
+u64 hppa_div64_rem(u64 div, u64 divisor)
+{
+ u64 rem;
+ div64_u64_rem(div, divisor, &rem);
+ return rem;
+}
diff --git a/arch/parisc/video/Makefile b/arch/parisc/video/Makefile
new file mode 100644
index 000000000000..b5db5b42880f
--- /dev/null
+++ b/arch/parisc/video/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_STI_CORE) += video-sti.o
diff --git a/arch/parisc/video/video-sti.c b/arch/parisc/video/video-sti.c
new file mode 100644
index 000000000000..564661e87093
--- /dev/null
+++ b/arch/parisc/video/video-sti.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2001-2020 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+ */
+
+#include <linux/module.h>
+
+#include <video/sticore.h>
+
+#include <asm/video.h>
+
+bool video_is_primary_device(struct device *dev)
+{
+ struct sti_struct *sti;
+
+ sti = sti_get_rom(0);
+
+ /* if no built-in graphics card found, allow any fb driver as default */
+ if (!sti)
+ return true;
+
+ /* return true if it's the default built-in framebuffer driver */
+ return (sti->dev == dev);
+}
+EXPORT_SYMBOL(video_is_primary_device);