diff options
Diffstat (limited to 'arch/parisc')
276 files changed, 13361 insertions, 8877 deletions
diff --git a/arch/parisc/Kbuild b/arch/parisc/Kbuild new file mode 100644 index 000000000000..749b195f2894 --- /dev/null +++ b/arch/parisc/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += mm/ kernel/ math-emu/ net/ + +# for cleaning +subdir- += boot diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7ca2c3ebad64..47fd9662d800 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,25 +1,39 @@ # SPDX-License-Identifier: GPL-2.0 config PARISC def_bool y + select ALTERNATE_USER_ADDRESS_SPACE + select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT - select HAVE_IDE - select HAVE_OPROFILE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_SYSCALL_TRACEPOINTS select ARCH_WANT_FRAME_POINTERS + select ARCH_HAS_CPU_CACHE_ALIASING + select ARCH_HAS_DMA_ALLOC if PA11 + select ARCH_HAS_DMA_OPS select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX - select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_UBSAN + select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN + select ARCH_SPLIT_ARG64 if !64BIT + select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE + select ARCH_STACKWALK + select ARCH_HAS_CACHE_LINE_SIZE + select ARCH_HAS_DEBUG_VM_PGTABLE + select HAVE_RELIABLE_STACKTRACE select RTC_CLASS select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE select BUG - select BUILDTIME_EXTABLE_SORT + select HAVE_KERNEL_UNCOMPRESSED select HAVE_PCI select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select PERF_USE_VMALLOC select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 @@ -29,36 +43,61 @@ config PARISC select GENERIC_ATOMIC64 if !64BIT select GENERIC_IRQ_PROBE select GENERIC_PCI_IOMAP + select GENERIC_IOREMAP select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD - select GENERIC_CPU_DEVICES - select GENERIC_STRNCPY_FROM_USER + select GENERIC_ARCH_TOPOLOGY if SMP + select ARCH_SUPPORTS_SCHED_MC if SMP && PA8X00 + select GENERIC_CPU_DEVICES if !SMP + select GENERIC_LIB_DEVMEM_IS_ALLOWED select SYSCTL_ARCH_UNALIGN_ALLOW + select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC - select VIRT_TO_BUS select MODULES_USE_ELF_RELA select CLONE_BACKWARDS select TTY # Needed for pdc_cons.c + select HAS_IOPORT if PCI || EISA select HAVE_DEBUG_STACKOVERFLOW + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HASH + # select HAVE_ARCH_JUMP_LABEL + # select HAVE_ARCH_JUMP_LABEL_RELATIVE + select HAVE_ARCH_KFENCE select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK + select HAVE_EBPF_JIT + select ARCH_WANT_DEFAULT_BPF_JIT select HAVE_REGS_AND_STACK_ACCESS_API + select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU select GENERIC_SCHED_CLOCK + select GENERIC_IRQ_MIGRATION if SMP select HAVE_UNSTABLE_SCHED_CLOCK if SMP select GENERIC_CLOCKEVENTS - select ARCH_NO_COHERENT_DMA_MMAP select CPU_NO_EFFICIENT_FFS + select THREAD_INFO_IN_TASK select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH + select HAVE_ARCH_KGDB + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1) + select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE + select HAVE_KPROBES_ON_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS + select TRACE_IRQFLAGS_SUPPORT + select HAVE_FUNCTION_DESCRIPTORS if 64BIT + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, and later HP3000 series). The PA-RISC Linux project home page is - at <http://www.parisc-linux.org/>. + at <https://parisc.wiki.kernel.org>. config CPU_BIG_ENDIAN def_bool y @@ -72,13 +111,7 @@ config STACK_GROWSUP config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPT - -config RWSEM_GENERIC_SPINLOCK - def_bool y - -config RWSEM_XCHGADD_ALGORITHM - bool + depends on SMP && PREEMPTION config ARCH_HAS_ILOG2_U32 bool @@ -89,9 +122,12 @@ config ARCH_HAS_ILOG2_U64 default n config GENERIC_BUG - bool - default y + def_bool y depends on BUG + select GENERIC_BUG_RELATIVE_POINTERS if 64BIT + +config GENERIC_BUG_RELATIVE_POINTERS + bool config GENERIC_HWEIGHT bool @@ -106,6 +142,20 @@ config TIME_LOW_RES depends on SMP default y +config ARCH_MMAP_RND_BITS_MIN + default 18 if 64BIT + default 8 + +config ARCH_MMAP_RND_COMPAT_BITS_MIN + default 8 + +config ARCH_MMAP_RND_BITS_MAX + default 18 if 64BIT + default 13 + +config ARCH_MMAP_RND_COMPAT_BITS_MAX + default 13 + # unless you want to implement ACPI on PA-RISC ... ;-) config PM bool @@ -113,6 +163,10 @@ config PM config STACKTRACE_SUPPORT def_bool y +config LOCKDEP_SUPPORT + bool + default y + config ISA_DMA_API bool @@ -126,19 +180,15 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 -config SYS_SUPPORTS_HUGETLBFS - def_bool y if PA20 - - menu "Processor type and features" choice prompt "Processor type" - default PA7000 + default PA7000 if "$(ARCH)" = "parisc" config PA7000 - bool "PA7000/PA7100" - ---help--- + bool "PA7000/PA7100" if "$(ARCH)" = "parisc" + help This is the processor type of your CPU. This information is used for optimizing purposes. In order to compile a kernel that can run on all 32-bit PA CPUs (albeit not optimally fast), @@ -148,21 +198,21 @@ config PA7000 which is required on some machines. config PA7100LC - bool "PA7100LC" + bool "PA7100LC" if "$(ARCH)" = "parisc" help Select this option for the PCX-L processor, as used in the 712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748, D200, D210, D300, D310 and E-class config PA7200 - bool "PA7200" + bool "PA7200" if "$(ARCH)" = "parisc" help Select this option for the PCX-T' processor, as used in the C100, C110, J100, J110, J210XC, D250, D260, D350, D360, K100, K200, K210, K220, K400, K410 and K420 config PA7300LC - bool "PA7300LC" + bool "PA7300LC" if "$(ARCH)" = "parisc" help Select this option for the PCX-L2 processor, as used in the 744, A180, B132L, B160L, B180L, C132L, C160L, C180L, @@ -186,16 +236,18 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH def_bool y depends on PA8X00 || PA7200 +config PARISC_HUGE_KERNEL + def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST + config MLONGCALLS - bool "Enable the -mlong-calls compiler option for big kernels" - default y + bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL depends on PA8X00 + default PARISC_HUGE_KERNEL help If you configure the kernel to include many drivers built-in instead as modules, the kernel executable may become too big, so that the @@ -210,8 +262,9 @@ config MLONGCALLS Enabling this option will probably slow down your kernel. config 64BIT - bool "64-bit kernel" + bool "64-bit kernel" if "$(ARCH)" = "parisc" depends on PA8X00 + default "$(ARCH)" = "parisc64" help Enable this if you want to support 64bit kernel on PA-RISC platform. @@ -228,6 +281,7 @@ choice config PARISC_PAGE_SIZE_4KB bool "4KB" + select HAVE_PAGE_SIZE_4KB help This lets you select the page size of the kernel. For best performance, a page size of 16KB is recommended. For best @@ -243,29 +297,19 @@ config PARISC_PAGE_SIZE_4KB config PARISC_PAGE_SIZE_16KB bool "16KB" - depends on PA8X00 && BROKEN + select HAVE_PAGE_SIZE_16KB + depends on PA8X00 && BROKEN && !KFENCE config PARISC_PAGE_SIZE_64KB bool "64KB" - depends on PA8X00 && BROKEN + select HAVE_PAGE_SIZE_64KB + depends on PA8X00 && BROKEN && !KFENCE endchoice -config PARISC_SELF_EXTRACT - bool "Build kernel as self-extracting executable" - default y - help - Say Y if you want to build the parisc kernel as a kind of - self-extracting executable. - - If you say N here, the kernel will be compressed with gzip - which can be loaded by the palo bootloader directly too. - - If you don't know what to do here, say Y. - config SMP bool "Symmetric multi-processing support" - ---help--- + help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more than one CPU, say Y. @@ -274,26 +318,11 @@ config SMP machines, but will use only one CPU of a multiprocessor machine. On a uniprocessor machine, the kernel will run faster if you say N. - See also <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO - available at <http://www.tldp.org/docs.html#howto>. + See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO + available at <https://www.tldp.org/docs.html#howto>. If you don't know what to do here, say N. -config PARISC_CPU_TOPOLOGY - bool "Support cpu topology definition" - depends on SMP - default y - help - Support PARISC cpu topology definition. - -config SCHED_MC - bool "Multi-core scheduler support" - depends on PARISC_CPU_TOPOLOGY && PA8X00 - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. If unsure say N here. - config IRQSTACKS bool "Use separate kernel stacks when processing interrupts" default y @@ -310,32 +339,22 @@ config ARCH_SELECT_MEMORY_MODEL def_bool y depends on 64BIT -config ARCH_DISCONTIGMEM_ENABLE +config ARCH_SPARSEMEM_ENABLE def_bool y depends on 64BIT config ARCH_FLATMEM_ENABLE def_bool y -config ARCH_DISCONTIGMEM_DEFAULT +config ARCH_SPARSEMEM_DEFAULT def_bool y - depends on ARCH_DISCONTIGMEM_ENABLE - -config NODES_SHIFT - int - default "3" - depends on NEED_MULTIPLE_NODES + depends on ARCH_SPARSEMEM_ENABLE source "kernel/Kconfig.hz" config COMPAT def_bool y depends on 64BIT - select COMPAT_BINFMT_ELF if BINFMT_ELF - -config SYSVIPC_COMPAT - def_bool y - depends on COMPAT && SYSVIPC config AUDIT_ARCH def_bool y @@ -344,25 +363,20 @@ config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 depends on SMP - default "4" + default "8" if 64BIT + default "16" endmenu +config ARCH_SUPPORTS_KEXEC + def_bool y -source "drivers/parisc/Kconfig" +config ARCH_SUPPORTS_KEXEC_FILE + def_bool y -config SECCOMP +config ARCH_SELECTS_KEXEC_FILE def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" - ---help--- - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. + depends on KEXEC_FILE + select KEXEC_ELF + +source "drivers/parisc/Kconfig" diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index 1478ded0e247..f4f164eb12df 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -1,4 +1,23 @@ # SPDX-License-Identifier: GPL-2.0 +# +config LIGHTWEIGHT_SPINLOCK_CHECK + bool "Enable lightweight spinlock checks" + depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK + default y + help + Add checks with low performance impact to the spinlock functions + to catch memory overwrites at runtime. For more advanced + spinlock debugging you should choose the DEBUG_SPINLOCK option + which will detect unitialized spinlocks too. + If unsure say Y here. + +config TLB_PTLOCK + bool "Use page table locks in TLB fault handler" + depends on DEBUG_KERNEL && SMP + default n + help + Select this option to enable page table locking in the TLB + fault handler. This ensures that page table entries are + updated consistently on SMP machines at the expense of some + loss in performance. -config TRACE_IRQFLAGS_SUPPORT - def_bool y diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index c19af26febe6..48ae3c79557a 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -2,9 +2,7 @@ # parisc/Makefile # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -13,38 +11,70 @@ # Copyright (C) 1994 by Linus Torvalds # Portions Copyright (C) 1999 The Puffin Group # -# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries, +# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries, # Mike Shaver, Helge Deller and Martin K. Petersen # -KBUILD_IMAGE := vmlinuz +boot := arch/parisc/boot +KBUILD_IMAGE := $(boot)/bzImage -KBUILD_DEFCONFIG := default_defconfig - -NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 -LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -export LIBGCC ifdef CONFIG_64BIT UTS_MACHINE := parisc64 CHECKFLAGS += -D__LP64__=1 -CC_ARCHES = hppa64 LD_BFD := elf64-hppa-linux else # 32-bit -CC_ARCHES = hppa hppa2.0 hppa1.1 LD_BFD := elf32-hppa-linux endif +# select defconfig based on actual architecture +ifeq ($(ARCH),parisc64) + KBUILD_DEFCONFIG := generic-64bit_defconfig + CC_ARCHES := hppa64 +else + KBUILD_DEFCONFIG := generic-32bit_defconfig + CC_ARCHES := hppa hppa2.0 hppa1.1 +endif + export LD_BFD -ifneq ($(SUBARCH),$(UTS_MACHINE)) - ifeq ($(CROSS_COMPILE),) - CC_SUFFIXES = linux linux-gnu unknown-linux-gnu +# Set default 32 bits cross compilers for vdso. +# This means that for 64BIT, both the 64-bit tools and the 32-bit tools +# need to be in the path. +CC_ARCHES_32 = hppa hppa2.0 hppa1.1 +CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux +CROSS32_COMPILE := $(call cc-cross-prefix, \ + $(foreach a,$(CC_ARCHES_32), \ + $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) +CROSS32CC := $(CROSS32_COMPILE)gcc +export CROSS32CC + +# Set default cross compiler for kernel build +ifdef cross_compiling + ifeq ($(CROSS_COMPILE),) + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux CROSS_COMPILE := $(call cc-cross-prefix, \ $(foreach a,$(CC_ARCHES), \ $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) - endif + endif +endif + +ifdef CONFIG_DYNAMIC_FTRACE +ifdef CONFIG_64BIT +NOP_COUNT := 8 +else +NOP_COUNT := 5 +endif + +export CC_USING_RECORD_MCOUNT:=1 +export CC_USING_PATCHABLE_FUNCTION_ENTRY:=1 + +KBUILD_AFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 +KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \ + -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT) + +CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1))) endif OBJCOPY_FLAGS =-O binary -R .note -R .comment -S @@ -85,16 +115,13 @@ cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300 cflags-$(CONFIG_PA8X00) += -march=2.0 -mschedule=8000 -head-y := arch/parisc/kernel/head.o - KBUILD_CFLAGS += $(cflags-y) +LIBGCC := $(shell $(CC) -print-libgcc-file-name) +export LIBGCC -kernel-y := mm/ kernel/ math-emu/ - -core-y += $(addprefix arch/parisc/, $(kernel-y)) libs-y += arch/parisc/lib/ $(LIBGCC) -drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ +drivers-$(CONFIG_VIDEO) += arch/parisc/video/ boot := arch/parisc/boot @@ -102,8 +129,8 @@ PALO := $(shell if (which palo 2>&1); then : ; \ elif [ -x /sbin/palo ]; then echo /sbin/palo; \ fi) -PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \ - else echo $(obj)/palo.conf; \ +PALOCONF := $(shell if [ -f $(srctree)/palo.conf ]; then echo $(srctree)/palo.conf; \ + else echo $(objtree)/palo.conf; \ fi) palo lifimage: vmlinuz @@ -113,8 +140,8 @@ palo lifimage: vmlinuz false; \ fi @if test ! -f "$(PALOCONF)"; then \ - cp $(src)/arch/parisc/defpalo.conf $(obj)/palo.conf; \ - echo 'A generic palo config file ($(obj)/palo.conf) has been created for you.'; \ + cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \ + echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \ echo 'You should check it and re-run "make palo".'; \ echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ false; \ @@ -135,20 +162,30 @@ Image: vmlinux bzImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -ifdef CONFIG_PARISC_SELF_EXTRACT vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ -else -vmlinuz: vmlinux - @gzip -cf -9 $< > $@ + +ifeq ($(KBUILD_EXTMOD),) +# We need to generate vdso-offsets.h before compiling certain files in kernel/. +# In order to do that, we should use the archprepare target, but we can't since +# asm-offsets.h is included in some files used to generate vdso-offsets.h, and +# asm-offsets.h is built in prepare0, for which archprepare is a dependency. +# Therefore we need to generate the header after prepare0 has been made, hence +# this hack. +prepare: vdso_prepare +vdso_prepare: prepare0 + $(if $(CONFIG_64BIT),$(Q)$(MAKE) \ + $(build)=arch/parisc/kernel/vdso64 include/generated/vdso64-offsets.h) + $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h endif -install: - $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ - $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)" -zinstall: - $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ - $(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)" +vdso-install-y += arch/parisc/kernel/vdso32/vdso32.so +vdso-install-$(CONFIG_64BIT) += arch/parisc/kernel/vdso64/vdso64.so + +install: KBUILD_IMAGE := vmlinux +zinstall: KBUILD_IMAGE := vmlinuz +install zinstall: + $(call cmd,install) CLEAN_FILES += lifimage MRPROPER_FILES += palo.conf diff --git a/arch/parisc/boot/.gitignore b/arch/parisc/boot/.gitignore index 017d5912ad2d..adf2ae0e7eda 100644 --- a/arch/parisc/boot/.gitignore +++ b/arch/parisc/boot/.gitignore @@ -1,2 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only image bzImage diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile index cad68a584884..657f967240ee 100644 --- a/arch/parisc/boot/Makefile +++ b/arch/parisc/boot/Makefile @@ -1,13 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the linux parisc-specific parts of the boot image creator. # -COMPILE_VERSION := __linux_compile_version_id__`hostname | \ - tr -c '[0-9A-Za-z]' '_'`__`date | \ - tr -c '[0-9A-Za-z]' '_'`_t - -ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. - targets := image targets += bzImage subdir- := compressed @@ -15,12 +10,8 @@ subdir- := compressed $(obj)/image: vmlinux FORCE $(call if_changed,objcopy) -$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE +$(obj)/bzImage: $(if $(CONFIG_KERNEL_UNCOMPRESSED),$(objtree)/vmlinux,$(obj)/compressed/vmlinux) FORCE $(call if_changed,objcopy) $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ - -install: $(CONFIGURE) $(obj)/bzImage - sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ - System.map "$(INSTALL_PATH)" diff --git a/arch/parisc/boot/compressed/.gitignore b/arch/parisc/boot/compressed/.gitignore index ae06b9b4c02f..a5839aa16706 100644 --- a/arch/parisc/boot/compressed/.gitignore +++ b/arch/parisc/boot/compressed/.gitignore @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only sizes.h vmlinux vmlinux.lds diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 777533cdea31..f8481e4e9d21 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -1,29 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only # # linux/arch/parisc/boot/compressed/Makefile # # create a compressed self-extracting vmlinux image from the original vmlinux # -KCOV_INSTRUMENT := n -GCOV_PROFILE := n -UBSAN_SANITIZE := n - +OBJECTS := head.o real2.o firmware.o misc.o piggy.o targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 -targets += misc.o piggy.o sizes.h head.o real2.o firmware.o +targets += $(OBJECTS) sizes.h KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING +KBUILD_CFLAGS += -fno-strict-aliasing KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -fno-builtin-printf KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls endif - -OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o +KBUILD_CFLAGS += -std=gnu11 -fms-extensions LDFLAGS_vmlinux := -X -e startup --as-needed -T -$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC) +$(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE $(call if_changed,ld) sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\)$$/\#define SZ\2 0x\1/p' @@ -31,7 +29,7 @@ sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\ quiet_cmd_sizes = GEN $@ cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@ -$(obj)/sizes.h: vmlinux +$(obj)/sizes.h: vmlinux FORCE $(call if_changed,sizes) AFLAGS_head.o += -I$(objtree)/$(obj) -DBOOTLOADER @@ -40,25 +38,15 @@ $(obj)/head.o: $(obj)/sizes.h CFLAGS_misc.o += -I$(objtree)/$(obj) $(obj)/misc.o: $(obj)/sizes.h -$(obj)/firmware.o: $(obj)/firmware.c -$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c - $(call cmd,shipped) - AFLAGS_real2.o += -DBOOTLOADER -$(obj)/real2.o: $(obj)/real2.S -$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S - $(call cmd,shipped) - -$(obj)/misc.o: $(obj)/sizes.h CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER $(obj)/vmlinux.lds: $(obj)/sizes.h -$(obj)/vmlinux.bin: vmlinux +OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S +$(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -vmlinux.bin.all-y := $(obj)/vmlinux.bin - suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZ4) := lz4 @@ -66,19 +54,19 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_XZ) := xz -$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) - $(call if_changed,bzip2) -$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) - $(call if_changed,lz4) -$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) - $(call if_changed,lzma) -$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) - $(call if_changed,lzo) -$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) - $(call if_changed,xzkern) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2_with_size) +$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE + $(call if_changed,lz4_with_size) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma_with_size) +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo_with_size) +$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE + $(call if_changed,xzkern_with_size) LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE $(call if_changed,ld) diff --git a/arch/parisc/boot/compressed/firmware.c b/arch/parisc/boot/compressed/firmware.c new file mode 100644 index 000000000000..16a07137fe92 --- /dev/null +++ b/arch/parisc/boot/compressed/firmware.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "../../kernel/firmware.c" diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S index 5aba20fa48aa..e8b798fd0cf0 100644 --- a/arch/parisc/boot/compressed/head.S +++ b/arch/parisc/boot/compressed/head.S @@ -22,7 +22,7 @@ __HEAD ENTRY(startup) - .level LEVEL + .level PA_ASM_LEVEL #define PSW_W_SM 0x200 #define PSW_W_BIT 36 @@ -63,7 +63,7 @@ $bss_loop: load32 BOOTADDR(decompress_kernel),%r3 #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL ssm PSW_W_SM, %r0 /* set W-bit */ depdi 0, 31, 32, %r3 #endif @@ -72,7 +72,7 @@ $bss_loop: startup_continue: #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL rsm PSW_W_SM, %r0 /* clear W-bit */ #endif diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 2556bb181813..9c83bd06ef15 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -6,7 +6,7 @@ #include <linux/uaccess.h> #include <linux/elf.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/page.h> #include "sizes.h" @@ -26,7 +26,7 @@ extern char input_data[]; extern int input_len; /* output_len is inserted by the linker possibly at an unaligned address */ -extern __le32 output_len __aligned(1); +extern char output_len; extern char _text, _end; extern char _bss, _ebss; extern char _startcode_end; @@ -117,7 +117,7 @@ char *strchr(const char *s, int c) return NULL; } -int puts(const char *s) +static int puts(const char *s) { const char *nuline = s; @@ -145,14 +145,13 @@ static int putchar(int c) void __noreturn error(char *x) { - puts("\n\n"); - puts(x); - puts("\n\n -- System halted"); + if (x) puts(x); + puts("\n -- System halted\n"); while (1) /* wait forever */ ; } -static int print_hex(unsigned long num) +static int print_num(unsigned long num, int base) { const char hex[] = "0123456789abcdef"; char str[40]; @@ -160,18 +159,20 @@ static int print_hex(unsigned long num) str[i--] = '\0'; do { - str[i--] = hex[num & 0x0f]; - num >>= 4; + str[i--] = hex[num % base]; + num = num / base; } while (num); - str[i--] = 'x'; - str[i] = '0'; + if (base == 16) { + str[i--] = 'x'; + str[i] = '0'; + } else i++; puts(&str[i]); return 0; } -int printf(const char *fmt, ...) +static int printf(const char *fmt, ...) { va_list args; int i = 0; @@ -187,8 +188,9 @@ put: if (fmt[++i] == '%') goto put; + print_num(va_arg(args, unsigned long), + fmt[i] == 'x' ? 16:10); ++i; - print_hex(va_arg(args, unsigned long)); } va_end(args); @@ -202,13 +204,13 @@ void abort(void) } #undef malloc -void *malloc(size_t size) +static void *malloc(size_t size) { return malloc_gzip(size); } #undef free -void free(void *ptr) +static void free(void *ptr) { return free_gzip(ptr); } @@ -276,7 +278,7 @@ static void parse_elf(void *output) free(phdrs); } -unsigned long decompress_kernel(unsigned int started_wide, +asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide, unsigned int command_line, const unsigned int rd_start, const unsigned int rd_end) @@ -327,8 +329,15 @@ unsigned long decompress_kernel(unsigned int started_wide, free_mem_end_ptr = rd_start; #endif - if (free_mem_ptr >= free_mem_end_ptr) - error("Kernel too big for machine."); + if (free_mem_ptr >= free_mem_end_ptr) { + int free_ram; + free_ram = (free_mem_ptr >> 20) + 1; + if (free_ram < 32) + free_ram = 32; + printf("\nKernel requires at least %d MB RAM.\n", + free_ram); + error(NULL); + } #ifdef DEBUG printf("\n"); diff --git a/arch/parisc/boot/compressed/real2.S b/arch/parisc/boot/compressed/real2.S new file mode 100644 index 000000000000..cdc6a4da3240 --- /dev/null +++ b/arch/parisc/boot/compressed/real2.S @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include "../../kernel/real2.S" diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S index bfd7872739a3..ab7b43990857 100644 --- a/arch/parisc/boot/compressed/vmlinux.lds.S +++ b/arch/parisc/boot/compressed/vmlinux.lds.S @@ -48,8 +48,8 @@ SECTIONS *(.rodata.compressed) } - /* bootloader code and data starts behind area of extracted kernel */ - . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); + /* bootloader code and data starts at least behind area of extracted kernel */ + . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START)); /* align on next page boundary */ . = ALIGN(4096); @@ -84,6 +84,7 @@ SECTIONS } STABS_DEBUG + ELF_DETAILS .note 0 : { *(.note) } /* Sections to be discarded */ diff --git a/arch/parisc/boot/install.sh b/arch/parisc/boot/install.sh deleted file mode 100644 index 8f7c365fad83..000000000000 --- a/arch/parisc/boot/install.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/sh -# -# arch/parisc/install.sh, derived from arch/i386/boot/install.sh -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# -# Copyright (C) 1995 by Linus Torvalds -# -# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin -# -# "make install" script for i386 architecture -# -# Arguments: -# $1 - kernel version -# $2 - kernel image file -# $3 - kernel map file -# $4 - default install path (blank if root directory) -# - -verify () { - if [ ! -f "$1" ]; then - echo "" 1>&2 - echo " *** Missing file: $1" 1>&2 - echo ' *** You need to run "make" before "make install".' 1>&2 - echo "" 1>&2 - exit 1 - fi -} - -# Make sure the files actually exist - -verify "$2" -verify "$3" - -# User may have a custom install script - -if [ -n "${INSTALLKERNEL}" ]; then - if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi - if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi -fi - -# Default install - -if [ "$(basename $2)" = "zImage" ]; then -# Compressed install - echo "Installing compressed kernel" - base=vmlinuz -else -# Normal install - echo "Installing normal kernel" - base=vmlinux -fi - -if [ -f $4/$base-$1 ]; then - mv $4/$base-$1 $4/$base-$1.old -fi -cat $2 > $4/$base-$1 - -# Install system map file -if [ -f $4/System.map-$1 ]; then - mv $4/System.map-$1 $4/System.map-$1.old -fi -cp $3 $4/System.map-$1 diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig deleted file mode 100644 index ccc109761f44..000000000000 --- a/arch/parisc/configs/712_defconfig +++ /dev/null @@ -1,182 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_PA7100LC=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_GSC_LASI=y -# CONFIG_PDC_CHASSIS is not set -CONFIG_BINFMT_MISC=m -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_DIAG=m -# CONFIG_IPV6 is not set -CONFIG_NETFILTER=y -CONFIG_LLC2=m -CONFIG_NET_PKTGEN=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_ATA_OVER_ETH=m -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_LASI700=y -CONFIG_SCSI_DEBUG=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_TUN=m -CONFIG_LASI_82596=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_KEYBOARD_HIL_OLD is not set -CONFIG_MOUSE_SERIAL=m -CONFIG_LEGACY_PTY_COUNT=64 -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=17 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_MUX is not set -CONFIG_PDC_CONSOLE=y -CONFIG_PRINTER=m -CONFIG_PPDEV=m -# CONFIG_HW_RANDOM is not set -CONFIG_RAW_DRIVER=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y -CONFIG_DUMMY_CONSOLE_COLUMNS=128 -CONFIG_DUMMY_CONSOLE_ROWS=48 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_HARMONY=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_JFS_FS=m -CONFIG_XFS_FS=m -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_UFS_FS=m -CONFIG_NFS_FS=y -CONFIG_NFS_V4=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_CIFS=m -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_HW is not set -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig deleted file mode 100644 index 5acb93dcaabf..000000000000 --- a/arch/parisc/configs/a500_defconfig +++ /dev/null @@ -1,177 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_PA8X00=y -CONFIG_64BIT=y -CONFIG_SMP=y -CONFIG_NR_CPUS=8 -# CONFIG_GSC is not set -CONFIG_PCI=y -CONFIG_PCI_LBA=y -CONFIG_PCCARD=m -# CONFIG_PCMCIA_LOAD_CIS is not set -CONFIG_YENTA=m -CONFIG_PD6729=m -CONFIG_I82092=m -# CONFIG_SUPERIO is not set -# CONFIG_CHASSIS_LCD_LED is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP_DCCP=m -# CONFIG_IP_DCCP_CCID3 is not set -CONFIG_LLC2=m -CONFIG_NET_PKTGEN=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_DEBUG=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_CTL=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_TUN=m -CONFIG_PCMCIA_3C574=m -CONFIG_PCMCIA_3C589=m -CONFIG_VORTEX=m -CONFIG_TYPHOON=m -CONFIG_ACENIC=m -CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_PCNET32=m -CONFIG_TIGON3=m -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_TULIP=y -CONFIG_TULIP_MMIO=y -CONFIG_PCMCIA_XIRCOM=m -CONFIG_HP100=m -CONFIG_E100=m -CONFIG_E1000=m -CONFIG_PCMCIA_SMC91C92=m -CONFIG_PCMCIA_XIRC2PS=m -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_CS=m -CONFIG_SERIAL_8250_NR_UARTS=17 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_PDC_CONSOLE=y -# CONFIG_HW_RANDOM is not set -CONFIG_RAW_DRIVER=y -# CONFIG_HWMON is not set -CONFIG_AGP=y -CONFIG_AGP_PARISC=y -# CONFIG_STI_CONSOLE is not set -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_JFS_FS=m -CONFIG_XFS_FS=m -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_UFS_FS=m -CONFIG_NFS_FS=m -CONFIG_NFS_V4=m -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_CIFS=m -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -CONFIG_MAGIC_SYSRQ=y -# CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_BLOWFISH=m -# CONFIG_CRYPTO_HW is not set diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig deleted file mode 100644 index 83ffd161aec5..000000000000 --- a/arch/parisc/configs/b180_defconfig +++ /dev/null @@ -1,97 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODVERSIONS=y -CONFIG_PA7100LC=y -CONFIG_HPPB=y -CONFIG_IOMMU_CCIO=y -CONFIG_GSC_LASI=y -CONFIG_GSC_WAX=y -CONFIG_EISA=y -CONFIG_ISA=y -CONFIG_PCI=y -CONFIG_GSC_DINO=y -# CONFIG_PDC_CHASSIS is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_LASI700=y -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 -CONFIG_SCSI_ZALON=y -CONFIG_SCSI_NCR53C8XX_SYNC=40 -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_NETDEVICES=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=y -CONFIG_LASI_82596=y -CONFIG_PPP=y -CONFIG_INPUT_EVDEV=y -# CONFIG_KEYBOARD_HIL_OLD is not set -CONFIG_INPUT_MISC=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=13 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_PRINTER=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_HARMONY=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m -CONFIG_HEADERS_CHECK=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_SECURITY=y diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig deleted file mode 100644 index 8d41a73bd71b..000000000000 --- a/arch/parisc/configs/c3000_defconfig +++ /dev/null @@ -1,151 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_PA8X00=y -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_GSC is not set -CONFIG_PCI=y -CONFIG_PCI_LBA=y -# CONFIG_PDC_CHASSIS is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_DIAG is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_NETFILTER=y -CONFIG_NET_PKTGEN=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_NS87415=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 -CONFIG_SCSI_DEBUG=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_CTL=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_TUN=m -CONFIG_ACENIC=m -CONFIG_TIGON3=m -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_TULIP=y -CONFIG_TULIP_MMIO=y -CONFIG_E100=m -CONFIG_E1000=m -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_MOUSE_PS2 is not set -CONFIG_SERIO=m -CONFIG_SERIO_LIBPS2=m -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=13 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_HW_RANDOM is not set -CONFIG_RAW_DRIVER=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_AD1889=y -CONFIG_USB_HIDDEV=y -CONFIG_USB=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_PRINTER=m -CONFIG_USB_STORAGE=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -CONFIG_USB_LEGOTOWER=m -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_XFS_FS=m -CONFIG_AUTOFS4_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_MUTEXES=y -# CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_MD5=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_DES=m -# CONFIG_CRYPTO_HW is not set diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig deleted file mode 100644 index 088ab948a5ca..000000000000 --- a/arch/parisc/configs/c8000_defconfig +++ /dev/null @@ -1,237 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -# CONFIG_CROSS_MEMORY_ATTACH is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_PA8X00=y -CONFIG_64BIT=y -CONFIG_SMP=y -CONFIG_PREEMPT=y -CONFIG_IOMMU_CCIO=y -CONFIG_PCI=y -CONFIG_PCI_LBA=y -# CONFIG_SUPERIO is not set -# CONFIG_CHASSIS_LCD_LED is not set -# CONFIG_PDC_CHASSIS is not set -# CONFIG_PDC_CHASSIS_WARN is not set -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=m -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -# CONFIG_IPV6 is not set -CONFIG_IP_DCCP=m -# CONFIG_IP_DCCP_CCID3 is not set -CONFIG_TIPC=m -CONFIG_LLC2=m -CONFIG_DNS_RESOLVER=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_PARPORT_PC_FIFO=y -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_SX8=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_WCACHE=y -CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_PLATFORM=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -CONFIG_ATA=y -CONFIG_PATA_SIL680=y -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_SAS=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NETCONSOLE=m -CONFIG_TUN=y -CONFIG_E1000=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_WLAN is not set -CONFIG_INPUT_FF_MEMLESS=m -# CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_KEYBOARD_HIL_OLD is not set -# CONFIG_KEYBOARD_HIL is not set -# CONFIG_MOUSE_PS2 is not set -CONFIG_INPUT_MISC=y -CONFIG_SERIO_SERPORT=m -CONFIG_SERIO_PARKBD=m -CONFIG_SERIO_GSCPS2=m -# CONFIG_HP_SDC is not set -CONFIG_SERIO_PCIPS2=m -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=8 -CONFIG_SERIAL_8250_RUNTIME_UARTS=8 -CONFIG_SERIAL_8250_EXTENDED=y -# CONFIG_SERIAL_MUX is not set -CONFIG_SERIAL_JSM=m -CONFIG_PRINTER=y -CONFIG_HW_RANDOM=y -CONFIG_RAW_DRIVER=m -CONFIG_PTP_1588_CLOCK=y -CONFIG_SSB=m -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_AGP=y -CONFIG_AGP_PARISC=y -CONFIG_DRM=y -CONFIG_DRM_RADEON=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_FOREIGN_ENDIAN=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y -# CONFIG_FB_STI is not set -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_STI_CONSOLE is not set -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_SOUND=m -CONFIG_SND=m -CONFIG_SND_VERBOSE_PRINTK=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_AD1889=m -# CONFIG_SND_USB is not set -# CONFIG_SND_GSC is not set -CONFIG_USB=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_STORAGE=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_PROC_INFO=y -CONFIG_XFS_FS=m -CONFIG_XFS_POSIX_ACL=y -CONFIG_QUOTA=y -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=m -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_NFS_FS=m -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_SLAB=y -CONFIG_DEBUG_SLAB_LEAK=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_PANIC_ON_OOPS=y -CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_DEBUG_BLOCK_EXT_DEVT=y -CONFIG_LATENCYTOP=y -CONFIG_KEYS=y -# CONFIG_CRYPTO_HW is not set -CONFIG_FONTS=y diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig deleted file mode 100644 index 52c9050a7c5c..000000000000 --- a/arch/parisc/configs/default_defconfig +++ /dev/null @@ -1,206 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PA7100LC=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_IOMMU_CCIO=y -CONFIG_GSC_LASI=y -CONFIG_GSC_WAX=y -CONFIG_EISA=y -CONFIG_PCI=y -CONFIG_GSC_DINO=y -CONFIG_PCI_LBA=y -CONFIG_PCCARD=y -CONFIG_YENTA=y -CONFIG_PD6729=y -CONFIG_I82092=y -CONFIG_BINFMT_MISC=m -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_DIAG=m -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_LLC2=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_PC_PCMCIA=m -CONFIG_PARPORT_1284=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECS=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_NS87415=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_LASI700=y -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_ZALON=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_BLK_DEV_DM=y -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_TUN=m -CONFIG_ACENIC=y -CONFIG_TIGON3=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=y -CONFIG_LASI_82596=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_KEYBOARD_HIL_OLD is not set -CONFIG_MOUSE_SERIAL=y -CONFIG_LEGACY_PTY_COUNT=64 -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_CS=y -CONFIG_SERIAL_8250_NR_UARTS=17 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_PRINTER=m -CONFIG_PPDEV=m -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y -CONFIG_DUMMY_CONSOLE_COLUMNS=128 -CONFIG_DUMMY_CONSOLE_ROWS=48 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_AD1889=y -CONFIG_SND_HARMONY=y -CONFIG_HID_GYRATION=y -CONFIG_HID_NTRIG=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_USB=y -CONFIG_USB_MON=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_UHCI_HCD=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V4=y -CONFIG_CIFS=m -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=y -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=y -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_KEYS=y -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_HW is not set -CONFIG_LIBCRC32C=m -CONFIG_FONTS=y diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 37ae4b57c001..5444ce6405f3 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -6,29 +6,24 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 +CONFIG_CGROUPS=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SYSCTL_SYSCALL=y CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set CONFIG_PA7100LC=y CONFIG_SMP=y CONFIG_HZ_100=y CONFIG_IOMMU_CCIO=y CONFIG_GSC_LASI=y CONFIG_GSC_WAX=y -CONFIG_EISA=y -CONFIG_PCI=y CONFIG_GSC_DINO=y CONFIG_PCI_LBA=y -CONFIG_PCCARD=m -CONFIG_YENTA=m # CONFIG_PDC_CHASSIS is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_NET=y @@ -37,18 +32,15 @@ CONFIG_UNIX=y CONFIG_XFRM_USER=m CONFIG_NET_KEY=m CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_BOOTP=y CONFIG_INET_AH=m CONFIG_INET_ESP=m -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set CONFIG_INET_DIAG=m CONFIG_LLC2=m # CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_EISA=y +CONFIG_PCI=y +CONFIG_PCCARD=m +CONFIG_YENTA=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_STANDALONE is not set @@ -57,22 +49,20 @@ CONFIG_PARPORT=y CONFIG_PARPORT_PC=m CONFIG_PARPORT_1284=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_NS87415=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y +# CONFIG_BLK_DEV_BSG is not set CONFIG_SCSI_LASI700=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_ZALON=y CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_PATA_NS87415=y +CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m @@ -91,17 +81,13 @@ CONFIG_TUN=m # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_CISCO is not set CONFIG_NET_TULIP=y CONFIG_TULIP=y # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set -# CONFIG_NET_VENDOR_HP is not set CONFIG_LASI_82596=y # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -109,10 +95,10 @@ CONFIG_LASI_82596=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_PACKET_ENGINE is not set # CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -126,7 +112,6 @@ CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m # CONFIG_WLAN is not set -CONFIG_INPUT_POLLDEV=y CONFIG_KEYBOARD_HIL_OLD=m CONFIG_KEYBOARD_HIL=m CONFIG_MOUSE_SERIAL=y @@ -134,7 +119,6 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=m CONFIG_LEGACY_PTY_COUNT=64 CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=8 CONFIG_SERIAL_8250_EXTENDED=y @@ -144,18 +128,53 @@ CONFIG_PRINTER=m CONFIG_PPDEV=m # CONFIG_HW_RANDOM is not set CONFIG_I2C=y -# CONFIG_HWMON is not set -CONFIG_AGP=y +CONFIG_HWMON=m +CONFIG_DRM=m +CONFIG_DRM_DISPLAY_DP_AUX_CEC=y +CONFIG_DRM_RADEON=m +CONFIG_DRM_NOUVEAU=m +# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set +# CONFIG_DRM_NOUVEAU_CH7006 is not set +# CONFIG_DRM_NOUVEAU_SIL164 is not set +CONFIG_DRM_VGEM=m +CONFIG_DRM_UDL=m +CONFIG_DRM_MGAG200=m CONFIG_FB=y CONFIG_FB_FOREIGN_ENDIAN=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_PM2=m +CONFIG_FB_PM2_FIFO_DISCONNECT=y +CONFIG_FB_NVIDIA=m +CONFIG_FB_NVIDIA_I2C=y +# CONFIG_FB_NVIDIA_BACKLIGHT is not set +CONFIG_FB_RIVA=m +CONFIG_FB_RIVA_I2C=y +# CONFIG_FB_RIVA_BACKLIGHT is not set CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y CONFIG_FB_MATROX_G=y +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_ATY128=m +# CONFIG_FB_ATY128_BACKLIGHT is not set +CONFIG_FB_ATY=m +CONFIG_FB_ATY_CT=y +CONFIG_FB_ATY_GX=y +# CONFIG_FB_ATY_BACKLIGHT is not set +CONFIG_FB_S3=m +CONFIG_FB_SAVAGE=m +CONFIG_FB_SAVAGE_I2C=y +CONFIG_FB_SAVAGE_ACCEL=y +CONFIG_FB_SIS=m +CONFIG_FB_SIS_300=y +CONFIG_FB_SIS_315=y CONFIG_FB_VOODOO1=m +CONFIG_FB_TRIDENT=m +CONFIG_FB_SMSCUFX=m +CONFIG_FB_UDL=m CONFIG_DUMMY_CONSOLE_COLUMNS=128 CONFIG_DUMMY_CONSOLE_ROWS=48 CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -212,12 +231,12 @@ CONFIG_AUXDISPLAY=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y -CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_VFAT_FS=y @@ -227,53 +246,27 @@ CONFIG_TMPFS_XATTR=y CONFIG_NFS_FS=m # CONFIG_NFS_V2 is not set CONFIG_NFSD=m -CONFIG_NFSD_V3=y CONFIG_CIFS=m -CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=y -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=y -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y +CONFIG_CRYPTO_BENCHMARK=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_DEFLATE=y +CONFIG_FONTS=y +CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_SHIRQ=y @@ -283,25 +276,3 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y CONFIG_LKDTM=m -CONFIG_KEYS=y -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y -CONFIG_FONTS=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index d39e7f821aba..ce91f9d1fdbf 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -1,43 +1,42 @@ CONFIG_LOCALVERSION="-64bit" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y CONFIG_CGROUP_PIDS=y CONFIG_CPUSETS=y +CONFIG_USER_NS=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -# CONFIG_COMPAT_BRK is not set -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_PA8X00=y -CONFIG_64BIT=y CONFIG_SMP=y -# CONFIG_COMPACTION is not set CONFIG_HPPB=y CONFIG_IOMMU_CCIO=y CONFIG_GSC_LASI=y CONFIG_GSC_WAX=y -CONFIG_PCI=y -CONFIG_PCI_STUB=m -CONFIG_PCI_IOV=y CONFIG_GSC_DINO=y CONFIG_PCI_LBA=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BINFMT_MISC=m +# CONFIG_COMPAT_BRK is not set +# CONFIG_COMPACTION is not set +CONFIG_MEMORY_FAILURE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -47,26 +46,20 @@ CONFIG_XFRM_MIGRATE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y -CONFIG_IP_PNP_BOOTP=y CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_INET_DIAG=m CONFIG_NETFILTER=y # CONFIG_NETFILTER_ADVANCED is not set CONFIG_NETFILTER_NETLINK_LOG=y CONFIG_DCB=y # CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCI_STUB=m +CONFIG_PCI_IOV=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_LOOP=y -CONFIG_IDE=y -CONFIG_IDE_GD=m -CONFIG_IDE_GD_ATAPI=y -CONFIG_BLK_DEV_IDECD=m -CONFIG_BLK_DEV_NS87415=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y @@ -74,12 +67,16 @@ CONFIG_SCSI_ISCSI_ATTRS=y CONFIG_SCSI_SRP_ATTRS=y CONFIG_ISCSI_BOOT_SYSFS=y CONFIG_SCSI_MPT2SAS=y -CONFIG_SCSI_LASI700=m +CONFIG_SCSI_LASI700=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_ZALON=y CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_VIA=y +CONFIG_PATA_NS87415=y CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y CONFIG_MD=y @@ -87,6 +84,7 @@ CONFIG_MD_LINEAR=m CONFIG_BLK_DEV_DM=m CONFIG_DM_RAID=m CONFIG_DM_UEVENT=y +CONFIG_DM_AUDIT=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y CONFIG_FUSION_SAS=y @@ -102,19 +100,15 @@ CONFIG_TUN=y # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_CISCO is not set CONFIG_NET_TULIP=y CONFIG_TULIP=y # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set -CONFIG_HP100=m -CONFIG_E1000=y CONFIG_LASI_82596=y +CONFIG_E1000=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set @@ -124,9 +118,9 @@ CONFIG_LASI_82596=y # CONFIG_NET_VENDOR_OKI is not set CONFIG_QLA3XXX=m CONFIG_QLCNIC=m -CONFIG_QLGE=m -# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set @@ -137,14 +131,13 @@ CONFIG_QLGE=m # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_MDIO_BITBANG=m CONFIG_PHYLIB=y CONFIG_BROADCOM_PHY=m CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m -CONFIG_LSI_ET1011C_PHY=m CONFIG_LXT_PHY=m +CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_NATIONAL_PHY=m CONFIG_QSEMI_PHY=m @@ -152,10 +145,8 @@ CONFIG_REALTEK_PHY=m CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_VITESSE_PHY=m +CONFIG_MDIO_BITBANG=m CONFIG_SLIP=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -CONFIG_SLIP_MODE_SLIP6=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_HIL_OLD is not set @@ -166,9 +157,7 @@ CONFIG_SERIO_SERPORT=m # CONFIG_HP_SDC is not set CONFIG_SERIO_RAW=m # CONFIG_LEGACY_PTYS is not set -CONFIG_NOZOMI=m CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=8 CONFIG_SERIAL_8250_RUNTIME_UARTS=8 @@ -176,6 +165,7 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_JSM=m +CONFIG_NOZOMI=m CONFIG_IPMI_HANDLER=y CONFIG_IPMI_DEVICE_INTERFACE=y CONFIG_IPMI_SI=y @@ -193,7 +183,6 @@ CONFIG_WATCHDOG=y CONFIG_SOFT_WATCHDOG=m CONFIG_SSB=m CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_HTC_PASIC3=m CONFIG_LPC_SCH=m CONFIG_MFD_SM501=m CONFIG_REGULATOR=y @@ -204,32 +193,54 @@ CONFIG_AGP=y CONFIG_AGP_PARISC=y CONFIG_DRM=y CONFIG_DRM_RADEON=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_MODE_HELPERS=y -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_DRM_NOUVEAU=m +# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set +# CONFIG_DRM_NOUVEAU_CH7006 is not set +# CONFIG_DRM_NOUVEAU_SIL164 is not set +CONFIG_DRM_MGAG200=m +CONFIG_FB=y +CONFIG_FB_PM2=m +CONFIG_FB_PM2_FIFO_DISCONNECT=y +CONFIG_FB_NVIDIA=m +CONFIG_FB_NVIDIA_I2C=y +# CONFIG_FB_NVIDIA_BACKLIGHT is not set +CONFIG_FB_RIVA=m +CONFIG_FB_RIVA_I2C=y +# CONFIG_FB_RIVA_BACKLIGHT is not set +CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB_MATROX_G=y +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_RADEON=y +# CONFIG_FB_RADEON_BACKLIGHT is not set +CONFIG_FB_ATY128=m +# CONFIG_FB_ATY128_BACKLIGHT is not set +CONFIG_FB_ATY=m +CONFIG_FB_ATY_CT=y +CONFIG_FB_ATY_GX=y +# CONFIG_FB_ATY_BACKLIGHT is not set +CONFIG_FB_S3=m +CONFIG_FB_SAVAGE=m +CONFIG_FB_SAVAGE_I2C=y +CONFIG_FB_SAVAGE_ACCEL=y +CONFIG_FB_SIS=m +CONFIG_FB_SIS_300=y +CONFIG_FB_SIS_315=y +CONFIG_FB_VOODOO1=m +CONFIG_FB_TRIDENT=m +CONFIG_FB_SMSCUFX=m +CONFIG_FB_UDL=m CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_HIDRAW=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_MON=m -CONFIG_USB_WUSB_CBAF=m -CONFIG_USB_XHCI_HCD=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_LEDS_TRIGGER_ONESHOT=y -CONFIG_LEDS_TRIGGER_DISK=y -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_AEC=m @@ -239,14 +250,14 @@ CONFIG_STAGING=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_BTRFS_FS=m CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y -CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=y CONFIG_CUSE=y CONFIG_ISO9660_FS=y @@ -256,7 +267,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_XATTR=y CONFIG_CONFIGFS_FS=y -CONFIG_SYSV_FS=y CONFIG_NFS_FS=m CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y @@ -272,23 +282,18 @@ CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_2=m CONFIG_NLS_UTF8=m -CONFIG_PRINTK_TIME=y -CONFIG_STRIP_ASM_SYMS=y -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_STACKOVERFLOW=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_DEFLATE=m # CONFIG_CRYPTO_HW is not set -CONFIG_CRC_CCITT=m -CONFIG_LIBCRC32C=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_KERNEL=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_SCHED_DEBUG is not set diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 0b1e354c8c24..4fb596d94c89 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,28 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h -generated-y += syscall_table_c32.h -generic-y += barrier.h -generic-y += current.h -generic-y += device.h -generic-y += div64.h -generic-y += emergency-restart.h -generic-y += exec.h -generic-y += hw_irq.h -generic-y += irq_regs.h -generic-y += irq_work.h -generic-y += kdebug.h -generic-y += kprobes.h -generic-y += local.h -generic-y += local64.h +generic-y += agp.h +generic-y += kvm_para.h generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h -generic-y += percpu.h -generic-y += preempt.h -generic-y += seccomp.h -generic-y += segment.h -generic-y += topology.h -generic-y += trace_clock.h generic-y += user.h -generic-y += vga.h -generic-y += word-at-a-time.h -generic-y += xor.h diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h deleted file mode 100644 index cb04470e63d0..000000000000 --- a/arch/parisc/include/asm/agp.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_PARISC_AGP_H -#define _ASM_PARISC_AGP_H - -/* - * PARISC specific AGP definitions. - * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org> - * - */ - -#define map_page_into_agp(page) /* nothing */ -#define unmap_page_from_agp(page) /* nothing */ -#define flush_agp_cache() mb() - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif /* _ASM_PARISC_AGP_H */ diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h index 793d8baa3a10..1601ae4b888d 100644 --- a/arch/parisc/include/asm/alternative.h +++ b/arch/parisc/include/asm/alternative.h @@ -8,11 +8,12 @@ #define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */ #define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */ #define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */ +#define ALT_COND_RUN_ON_QEMU 0x20 /* if running on QEMU */ #define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */ #define INSN_NOP 0x08000240 /* nop */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/init.h> #include <linux/types.h> @@ -21,10 +22,10 @@ struct alt_instr { s32 orig_offset; /* offset to original instructions */ - u32 len; /* end of original instructions */ - u32 cond; /* see ALT_COND_XXX */ + s16 len; /* end of original instructions */ + u16 cond; /* see ALT_COND_XXX */ u32 replacement; /* replacement instruction or code */ -}; +} __packed; void set_kernel_text_rw(int enable_read_write); void apply_alternatives_all(void); @@ -33,19 +34,33 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end, /* Alternative SMP implementation. */ #define ALTERNATIVE(cond, replacement) "!0:" \ - ".section .altinstructions, \"aw\" !" \ - ".word (0b-4-.), 1, " __stringify(cond) "," \ - __stringify(replacement) " !" \ + ".section .altinstructions, \"a\" !" \ + ".align 4 !" \ + ".word (0b-4-.) !" \ + ".hword 1, " __stringify(cond) " !" \ + ".word " __stringify(replacement) " !" \ ".previous" #else +/* to replace one single instructions by a new instruction */ #define ALTERNATIVE(from, to, cond, replacement)\ - .section .altinstructions, "aw" ! \ - .word (from - .), (to - from)/4 ! \ - .word cond, replacement ! \ + .section .altinstructions, "a" ! \ + .align 4 ! \ + .word (from - .) ! \ + .hword (to - from)/4, cond ! \ + .word replacement ! \ .previous -#endif /* __ASSEMBLY__ */ +/* to replace multiple instructions by new code */ +#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\ + .section .altinstructions, "a" ! \ + .align 4 ! \ + .word (from - .) ! \ + .hword -num_instructions, cond ! \ + .word (new_instr_ptr - .) ! \ + .previous + +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_ALTERNATIVE_H */ diff --git a/arch/parisc/include/asm/asmregs.h b/arch/parisc/include/asm/asmregs.h index d93c646e1887..81d8029d8fc2 100644 --- a/arch/parisc/include/asm/asmregs.h +++ b/arch/parisc/include/asm/asmregs.h @@ -1,19 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _PARISC_ASMREGS_H diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index c17ec0ee6e7c..c20261604f09 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -1,53 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 1999 SuSE GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright (C) 2021 Helge Deller <deller@gmx.de> */ #ifndef _PARISC_ASSEMBLY_H #define _PARISC_ASSEMBLY_H -#define CALLEE_FLOAT_FRAME_SIZE 80 - #ifdef CONFIG_64BIT -#define LDREG ldd -#define STREG std -#define LDREGX ldd,s -#define LDREGM ldd,mb -#define STREGM std,ma -#define SHRREG shrd -#define SHLREG shld -#define ANDCM andcm,* -#define COND(x) * ## x #define RP_OFFSET 16 #define FRAME_SIZE 128 #define CALLEE_REG_FRAME_SIZE 144 #define REG_SZ 8 #define ASM_ULONG_INSN .dword #else /* CONFIG_64BIT */ -#define LDREG ldw -#define STREG stw -#define LDREGX ldwx,s -#define LDREGM ldwm -#define STREGM stwm -#define SHRREG shr -#define SHLREG shlw -#define ANDCM andcm -#define COND(x) x #define RP_OFFSET 20 #define FRAME_SIZE 64 #define CALLEE_REG_FRAME_SIZE 128 @@ -55,23 +23,59 @@ #define ASM_ULONG_INSN .word #endif +/* Frame alignment for 32- and 64-bit */ +#define FRAME_ALIGN 64 + +#define CALLEE_FLOAT_FRAME_SIZE 80 #define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE) #ifdef CONFIG_PA20 #define LDCW ldcw,co #define BL b,l # ifdef CONFIG_64BIT -# define LEVEL 2.0w +# define PA_ASM_LEVEL 2.0w # else -# define LEVEL 2.0 +# define PA_ASM_LEVEL 2.0 # endif #else #define LDCW ldcw #define BL bl -#define LEVEL 1.1 +#define PA_ASM_LEVEL 1.1 #endif -#ifdef __ASSEMBLY__ +/* Privilege level field in the rightmost two bits of the IA queues */ +#define PRIV_USER 3 +#define PRIV_KERNEL 0 + +/* Space register used inside kernel */ +#define SR_KERNEL 0 +#define SR_TEMP1 1 +#define SR_TEMP2 2 +#define SR_USER 3 + +#ifdef __ASSEMBLER__ + +#ifdef CONFIG_64BIT +#define LDREG ldd +#define STREG std +#define LDREGX ldd,s +#define LDREGM ldd,mb +#define STREGM std,ma +#define SHRREG shrd +#define SHLREG shld +#define ANDCM andcm,* +#define COND(x) * ## x +#else /* CONFIG_64BIT */ +#define LDREG ldw +#define STREG stw +#define LDREGX ldwx,s +#define LDREGM ldwm +#define STREGM stwm +#define SHRREG shr +#define SHLREG shlw +#define ANDCM andcm +#define COND(x) x +#endif #ifdef CONFIG_64BIT /* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so @@ -84,10 +88,7 @@ #include <asm/types.h> #include <asm/asmregs.h> - - sp = 30 - gp = 27 - ipsw = 22 +#include <asm/psw.h> /* * We provide two versions of each macro to convert from physical @@ -96,26 +97,28 @@ * version takes two arguments: a src and destination register. * However, the source and destination registers can not be * the same register. + * + * We use add,l to avoid clobbering the C/B bits in the PSW. */ .macro tophys grvirt, grphys - ldil L%(__PAGE_OFFSET), \grphys - sub \grvirt, \grphys, \grphys + ldil L%(-__PAGE_OFFSET), \grphys + addl \grvirt, \grphys, \grphys .endm - + .macro tovirt grphys, grvirt ldil L%(__PAGE_OFFSET), \grvirt - add \grphys, \grvirt, \grvirt + addl \grphys, \grvirt, \grvirt .endm .macro tophys_r1 gr - ldil L%(__PAGE_OFFSET), %r1 - sub \gr, %r1, \gr + ldil L%(-__PAGE_OFFSET), %r1 + addl \gr, %r1, \gr .endm - + .macro tovirt_r1 gr ldil L%(__PAGE_OFFSET), %r1 - add \gr, %r1, \gr + addl \gr, %r1, \gr .endm .macro delay value @@ -138,7 +141,7 @@ depd,z \r, 63-(\sa), 64-(\sa), \t .endm - /* Shift Right - note the r and t can NOT be the same! */ + /* Shift Right for 32-bit. Clobbers upper 32-bit on PA2.0. */ .macro shr r, sa, t extru \r, 31-(\sa), 32-(\sa), \t .endm @@ -148,6 +151,37 @@ extrd,u \r, 63-(\sa), 64-(\sa), \t .endm + /* Extract unsigned for 32- and 64-bit + * The extru instruction leaves the most significant 32 bits of the + * target register in an undefined state on PA 2.0 systems. */ + .macro extru_safe r, p, len, t +#ifdef CONFIG_64BIT + extrd,u \r, 32+(\p), \len, \t +#else + extru \r, \p, \len, \t +#endif + .endm + + /* The depi instruction leaves the most significant 32 bits of the + * target register in an undefined state on PA 2.0 systems. */ + .macro depi_safe i, p, len, t +#ifdef CONFIG_64BIT + depdi \i, 32+(\p), \len, \t +#else + depi \i, \p, \len, \t +#endif + .endm + + /* The depw instruction leaves the most significant 32 bits of the + * target register in an undefined state on PA 2.0 systems. */ + .macro dep_safe i, p, len, t +#ifdef CONFIG_64BIT + depd \i, 32+(\p), \len, \t +#else + depw \i, \p, \len, \t +#endif + .endm + /* load 32-bit 'value' into 'reg' compensating for the ldil * sign-extension when running in wide mode. * WARNING!! neither 'value' nor 'reg' can be expressions @@ -510,6 +544,30 @@ nop /* 7 */ .endm + /* Switch to virtual mapping, trashing only %r1 */ + .macro virt_map + /* pcxt_ssm_bug */ + rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ + mtsp %r0, %sr4 + mtsp %r0, %sr5 + mtsp %r0, %sr6 + tovirt_r1 %r29 + load32 KERNEL_PSW, %r1 + + rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ + mtctl %r0, %cr17 /* Clear IIASQ tail */ + mtctl %r0, %cr17 /* Clear IIASQ head */ + mtctl %r1, %ipsw + load32 4f, %r1 + mtctl %r1, %cr18 /* Set IIAOQ tail */ + ldo 4(%r1), %r1 + mtctl %r1, %cr18 /* Set IIAOQ head */ + rfir + nop +4: + .endm + + /* * ASM_EXCEPTIONTABLE_ENTRY * @@ -518,9 +576,11 @@ */ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \ .section __ex_table,"aw" ! \ + .align 4 ! \ .word (fault_addr - .), (except_addr - .) ! \ + or %r0,%r0,%r0 ! \ .previous -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..d4f023887ff8 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -34,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) @@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; * are atomic, so a reader never sees inconsistent values. */ -static __inline__ void atomic_set(atomic_t *v, int i) +static __inline__ void arch_atomic_set(atomic_t *v, int i) { unsigned long flags; _atomic_spin_lock_irqsave(v, flags); @@ -66,29 +66,25 @@ static __inline__ void atomic_set(atomic_t *v, int i) _atomic_spin_unlock_irqrestore(v, flags); } -#define atomic_set_release(v, i) atomic_set((v), (i)) +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) -static __inline__ int atomic_read(const atomic_t *v) +static __inline__ int arch_atomic_read(const atomic_t *v) { return READ_ONCE((v)->counter); } -/* exported interface */ -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - #define ATOMIC_OP(op, c_op) \ -static __inline__ void atomic_##op(int i, atomic_t *v) \ +static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ -} \ +} #define ATOMIC_OP_RETURN(op, c_op) \ -static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ +static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -101,7 +97,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ +static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -122,6 +118,11 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ @@ -131,29 +132,31 @@ ATOMIC_OPS(and, &=) ATOMIC_OPS(or, |=) ATOMIC_OPS(xor, ^=) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC_INIT(i) { (i) } - #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_OP(op, c_op) \ -static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ +static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ -} \ +} #define ATOMIC64_OP_RETURN(op, c_op) \ -static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ +static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ s64 ret; \ @@ -166,7 +169,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(op, c_op) \ -static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ +static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ s64 ret; \ @@ -187,6 +190,11 @@ static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ @@ -196,13 +204,17 @@ ATOMIC64_OPS(and, &=) ATOMIC64_OPS(or, |=) ATOMIC64_OPS(xor, ^=) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP static __inline__ void -atomic64_set(atomic64_t *v, s64 i) +arch_atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; _atomic_spin_lock_irqsave(v, flags); @@ -212,17 +224,14 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i)) + static __inline__ s64 -atomic64_read(const atomic64_t *v) +arch_atomic64_read(const atomic64_t *v) { return READ_ONCE((v)->counter); } -/* exported interface */ -#define atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) - #endif /* !CONFIG_64BIT */ diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f..519b1903c5ed 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -2,11 +2,15 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H -#ifndef __ASSEMBLY__ +#include <asm/alternative.h> + +#ifndef __ASSEMBLER__ /* The synchronize caches instruction executes as a nop on systems in which all memory references are performed in order. */ -#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") +#define synchronize_caches() asm volatile("sync" \ + ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP) \ + : : : "memory") #if defined(CONFIG_SMP) #define mb() do { synchronize_caches(); } while (0) @@ -26,7 +30,68 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include <asm-generic/barrier.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index a09eaebfdfd0..bd1280a8a5ec 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h @@ -12,21 +12,6 @@ #include <asm/barrier.h> #include <linux/atomic.h> -/* - * HP-PARISC specific bit operations - * for a detailed description of the functions please refer - * to include/asm-i386/bitops.h or kerneldoc - */ - -#if __BITS_PER_LONG == 64 -#define SHIFT_PER_LONG 6 -#else -#define SHIFT_PER_LONG 5 -#endif - -#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) - - /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion * on use of volatile and __*_bit() (set/clear/change): * *_bit() want use of volatile. @@ -35,10 +20,10 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); + unsigned long mask = BIT_MASK(nr); unsigned long flags; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); *addr |= mask; _atomic_spin_unlock_irqrestore(addr, flags); @@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) static __inline__ void clear_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); + unsigned long mask = BIT_MASK(nr); unsigned long flags; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); - *addr &= mask; + *addr &= ~mask; _atomic_spin_unlock_irqrestore(addr, flags); } static __inline__ void change_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); + unsigned long mask = BIT_MASK(nr); unsigned long flags; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); *addr ^= mask; _atomic_spin_unlock_irqrestore(addr, flags); @@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); + unsigned long mask = BIT_MASK(nr); unsigned long old; unsigned long flags; int set; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); old = *addr; set = (old & mask) ? 1 : 0; @@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); + unsigned long mask = BIT_MASK(nr); unsigned long old; unsigned long flags; int set; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); old = *addr; set = (old & mask) ? 1 : 0; @@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) { - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); + unsigned long mask = BIT_MASK(nr); unsigned long oldbit; unsigned long flags; - addr += (nr >> SHIFT_PER_LONG); + addr += BIT_WORD(nr); _atomic_spin_lock_irqsave(addr, flags); oldbit = *addr; *addr = oldbit ^ mask; @@ -119,8 +104,6 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) #include <asm-generic/bitops/non-atomic.h> -#ifdef __KERNEL__ - /** * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". * @word: The word to search @@ -140,7 +123,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) * cycles for each mispredicted branch. */ -static __inline__ unsigned long __ffs(unsigned long x) +static __inline__ __attribute_const__ unsigned long __ffs(unsigned long x) { unsigned long ret; @@ -178,7 +161,7 @@ static __inline__ unsigned long __ffs(unsigned long x) * This is defined the same way as the libc and compiler builtin * ffs routines, therefore differs in spirit from the above ffz (man ffs). */ -static __inline__ int ffs(int x) +static __inline__ __attribute_const__ int ffs(int x) { return x ? (__ffs((unsigned long)x) + 1) : 0; } @@ -188,7 +171,7 @@ static __inline__ int ffs(int x) * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __inline__ int fls(unsigned int x) +static __inline__ __attribute_const__ int fls(unsigned int x) { int ret; if (!x) @@ -220,16 +203,7 @@ static __inline__ int fls(unsigned int x) #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> - -#endif /* __KERNEL__ */ - -#include <asm-generic/bitops/find.h> - -#ifdef __KERNEL__ - #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> -#endif /* __KERNEL__ */ - #endif /* _PARISC_BITOPS_H */ diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 4b6d60b94124..5cf35489ad80 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h @@ -2,8 +2,6 @@ #ifndef _PARISC_BUG_H #define _PARISC_BUG_H -#include <linux/kernel.h> /* for BUGFLAG_TAINT */ - /* * Tell the user there is some problem. * The offending file and line are encoded in the __bug_table section. @@ -17,24 +15,27 @@ #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff" #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */ -#if defined(CONFIG_64BIT) -#define ASM_WORD_INSN ".dword\t" +#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS +# define __BUG_REL(val) ".word " __stringify(val) " - ." #else -#define ASM_WORD_INSN ".word\t" +# define __BUG_REL(val) ".word " __stringify(val) #endif + #ifdef CONFIG_DEBUG_BUGVERBOSE #define BUG() \ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"aw\"\n" \ - "2:\t" ASM_WORD_INSN "1b, %c0\n" \ - "\t.short %c1, %c2\n" \ - "\t.org 2b+%c3\n" \ + "\t.pushsection __bug_table,\"a\"\n" \ + "\t.align 4\n" \ + "2:\t" __BUG_REL(1b) "\n" \ + "\t" __BUG_REL(%c0) "\n" \ + "\t.short %1, %2\n" \ + "\t.blockz %3-2*4-2*2\n" \ "\t.popsection" \ : : "i" (__FILE__), "i" (__LINE__), \ - "i" (0), "i" (sizeof(struct bug_entry)) ); \ + "i" (0), "i" (sizeof(struct bug_entry)) ); \ unreachable(); \ } while(0) @@ -47,28 +48,31 @@ #endif #ifdef CONFIG_DEBUG_BUGVERBOSE -#define __WARN_FLAGS(flags) \ +#define __WARN_FLAGS(cond_str, flags) \ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"aw\"\n" \ - "2:\t" ASM_WORD_INSN "1b, %c0\n" \ - "\t.short %c1, %c2\n" \ - "\t.org 2b+%c3\n" \ + "\t.pushsection __bug_table,\"a\"\n" \ + "\t.align 4\n" \ + "2:\t" __BUG_REL(1b) "\n" \ + "\t" __BUG_REL(%c0) "\n" \ + "\t.short %1, %2\n" \ + "\t.blockz %3-2*4-2*2\n" \ "\t.popsection" \ - : : "i" (__FILE__), "i" (__LINE__), \ + : : "i" (WARN_CONDITION_STR(cond_str) __FILE__), "i" (__LINE__), \ "i" (BUGFLAG_WARNING|(flags)), \ "i" (sizeof(struct bug_entry)) ); \ } while(0) #else -#define __WARN_FLAGS(flags) \ +#define __WARN_FLAGS(cond_str, flags) \ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"aw\"\n" \ - "2:\t" ASM_WORD_INSN "1b\n" \ - "\t.short %c0\n" \ - "\t.org 2b+%c1\n" \ + "\t.pushsection __bug_table,\"a\"\n" \ + "\t.align 4\n" \ + "2:\t" __BUG_REL(1b) "\n" \ + "\t.short %0\n" \ + "\t.blockz %1-4-2\n" \ "\t.popsection" \ : : "i" (BUGFLAG_WARNING|(flags)), \ "i" (sizeof(struct bug_entry)) ); \ diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h deleted file mode 100644 index 0a7f9db6bd1c..000000000000 --- a/arch/parisc/include/asm/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/asm-parisc/bugs.h - * - * Copyright (C) 1999 Mike Shaver - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -#include <asm/processor.h> - -static inline void check_bugs(void) -{ -// identify_cpu(&boot_cpu_data); -} diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 006fb939cac8..3f8d3be6ef24 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -16,16 +16,22 @@ #define L1_CACHE_BYTES 16 #define L1_CACHE_SHIFT 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#ifdef CONFIG_PA20 +#define ARCH_DMA_MINALIGN 128 +#else +#define ARCH_DMA_MINALIGN 32 +#endif +#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define arch_slab_minalign() ((unsigned)dcache_stride) +#define cache_line_size() dcache_stride +#define dma_get_cache_alignment cache_line_size -/* Read-only memory is marked before mark_rodata_ro() is called. */ -#define __ro_after_init __read_mostly +#define __read_mostly __section(".data..read_mostly") void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ @@ -40,28 +46,27 @@ extern int split_tlb; extern int dcache_stride; extern int icache_stride; extern struct pdc_cache_info cache_info; +extern struct pdc_btlb_info btlb_info; void parisc_setup_cache_timing(void); -#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \ +#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ - : : "r" (addr)) -#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \ + : : "i"(sr), "r" (addr) : "memory") +#define pitlb(sr, addr) asm volatile("pitlb 0(%%sr%0,%1)" \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \ - : : "r" (addr)) -#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \ - ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ - : : "r" (addr)) + : : "i"(sr), "r" (addr) : "memory") #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \ - : : "r" (addr)) + : : "r" (addr) : "memory") #define asm_io_sync() asm volatile("sync" \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ - ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: ) + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory") +#define asm_syncdma() asm volatile("syncdma" :::"memory") -#endif /* ! __ASSEMBLY__ */ +#endif /* ! __ASSEMBLER__ */ /* Classes of processor wrt: disabling space register hashing */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 0c83644bfa5c..8394718870e1 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -9,16 +9,11 @@ /* The usual comment is "Caches aren't brain-dead on the <architecture>". * Unfortunately, that doesn't apply to PA-RISC. */ -/* Internal implementation */ -void flush_data_cache_local(void *); /* flushes local data-cache only */ -void flush_instruction_cache_local(void *); /* flushes local code-cache only */ -#ifdef CONFIG_SMP -void flush_data_cache(void); /* flushes data-cache only (all processors) */ -void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ -#else -#define flush_data_cache() flush_data_cache_local(NULL) -#define flush_instruction_cache() flush_instruction_cache_local(NULL) -#endif +#include <linux/jump_label.h> + +DECLARE_STATIC_KEY_TRUE(parisc_has_cache); +DECLARE_STATIC_KEY_TRUE(parisc_has_dcache); +DECLARE_STATIC_KEY_TRUE(parisc_has_icache); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) @@ -27,7 +22,7 @@ void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void purge_kernel_dcache_range_asm(unsigned long, unsigned long); -void flush_kernel_dcache_page_asm(void *); +void flush_kernel_dcache_page_asm(const void *addr); void flush_kernel_icache_page(void *); /* Cache flush operations */ @@ -36,101 +31,56 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); -#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -void flush_kernel_dcache_page_addr(void *addr); -static inline void flush_kernel_dcache_page(struct page *page) -{ - flush_kernel_dcache_page_addr(page_address(page)); -} - #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* The only way to flush a vmap range is to flush whole cache */ +#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 void flush_kernel_vmap_range(void *vaddr, int size); void invalidate_kernel_vmap_range(void *vaddr, int size); -#define flush_cache_vmap(start, end) flush_cache_all() -#define flush_cache_vunmap(start, end) flush_cache_all() +void flush_cache_vmap(unsigned long start, unsigned long end); +#define flush_cache_vmap_early(start, end) do { } while (0) +void flush_cache_vunmap(unsigned long start, unsigned long end); +void flush_dcache_folio(struct folio *folio); +#define flush_dcache_folio flush_dcache_folio #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -extern void flush_dcache_page(struct page *page); +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) +#define flush_dcache_mmap_lock_irqsave(mapping, flags) \ + xa_lock_irqsave(&mapping->i_pages, flags) +#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ + xa_unlock_irqrestore(&mapping->i_pages, flags) -#define flush_icache_page(vma,page) do { \ - flush_kernel_dcache_page(page); \ - flush_kernel_icache_page(page_address(page)); \ -} while (0) +void flush_icache_pages(struct vm_area_struct *vma, struct page *page, + unsigned int nr); +#define flush_icache_pages flush_icache_pages #define flush_icache_range(s,e) do { \ flush_kernel_dcache_range_asm(s,e); \ flush_kernel_icache_range_asm(s,e); \ } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page)); \ - memcpy(dst, src, len); \ - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ -} while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ -do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page)); \ - memcpy(dst, src, len); \ -} while (0) - -void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len); +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len); +void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, + unsigned long pfn); void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -/* defined in pacache.S exported in cache.c used by flush_anon_page */ -void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); - #define ARCH_HAS_FLUSH_ANON_PAGE -static inline void -flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) -{ - if (PageAnon(page)) { - flush_tlb_page(vma, vmaddr); - preempt_disable(); - flush_dcache_page_asm(page_to_phys(page), vmaddr); - preempt_enable(); - } -} - -#include <asm/kmap_types.h> - -#define ARCH_HAS_KMAP - -static inline void *kmap(struct page *page) -{ - might_sleep(); - return page_address(page); -} - -static inline void kunmap(struct page *page) -{ - flush_kernel_dcache_page_addr(page_address(page)); -} - -static inline void *kmap_atomic(struct page *page) -{ - preempt_disable(); - pagefault_disable(); - return page_address(page); -} - -static inline void __kunmap_atomic(void *addr) -{ - flush_kernel_dcache_page_addr(addr); - pagefault_enable(); - preempt_enable(); -} +void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); -#define kmap_atomic_prot(page, prot) kmap_atomic(page) -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +#define ARCH_HAS_FLUSH_ON_KUNMAP +void kunmap_flush_on_unmap(const void *addr); #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/cachetype.h b/arch/parisc/include/asm/cachetype.h new file mode 100644 index 000000000000..e0868a1d3c47 --- /dev/null +++ b/arch/parisc/include/asm/cachetype.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PARISC_CACHETYPE_H +#define __ASM_PARISC_CACHETYPE_H + +#include <linux/types.h> + +#define cpu_dcache_is_aliasing() true + +#endif diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index 3cbf1f1c1188..2aceebcd695c 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -19,21 +19,6 @@ extern __wsum csum_partial(const void *, int, __wsum); /* - * The same as csum_partial, but copies from src while it checksums. - * - * Here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); - -/* - * this is a new version of the above that records errors it finds in *errp, - * but continues and zeros the rest of the buffer. - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, - void *dst, int len, __wsum sum, int *errp); - -/* * Optimized for IP headers, which always checksum on 4 octet boundaries. * * Written by Randolph Chung <tausq@debian.org>, and then mucked with by @@ -42,31 +27,32 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; + unsigned long t0, t1, t2; __asm__ __volatile__ ( " ldws,ma 4(%1), %0\n" " addib,<= -4, %2, 2f\n" "\n" -" ldws 4(%1), %%r20\n" -" ldws 8(%1), %%r21\n" -" add %0, %%r20, %0\n" -" ldws,ma 12(%1), %%r19\n" -" addc %0, %%r21, %0\n" -" addc %0, %%r19, %0\n" -"1: ldws,ma 4(%1), %%r19\n" -" addib,< 0, %2, 1b\n" -" addc %0, %%r19, %0\n" +" ldws 4(%1), %4\n" +" ldws 8(%1), %5\n" +" add %0, %4, %0\n" +" ldws,ma 12(%1), %3\n" +" addc %0, %5, %0\n" +" addc %0, %3, %0\n" +"1: ldws,ma 4(%1), %3\n" +" addib,> -1, %2, 1b\n" +" addc %0, %3, %0\n" "\n" -" extru %0, 31, 16, %%r20\n" -" extru %0, 15, 16, %%r21\n" -" addc %%r20, %%r21, %0\n" -" extru %0, 15, 16, %%r21\n" -" add %0, %%r21, %0\n" +" extru %0, 31, 16, %4\n" +" extru %0, 15, 16, %5\n" +" addc %4, %5, %0\n" +" extru %0, 15, 16, %5\n" +" add %0, %5, %0\n" " subi -1, %0, %0\n" "2:\n" - : "=r" (sum), "=r" (iph), "=r" (ihl) + : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (t0), "=r" (t1), "=r" (t2) : "1" (iph), "2" (ihl) - : "r19", "r20", "r21", "memory"); + : "memory"); return (__force __sum16)sum; } @@ -126,6 +112,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, __u32 len, __u8 proto, __wsum sum) { + unsigned long t0, t1, t2, t3; + + len += proto; /* add 16-bit proto + len */ + __asm__ __volatile__ ( #if BITS_PER_LONG > 32 @@ -136,20 +126,20 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ** Try to keep 4 registers with "live" values ahead of the ALU. */ -" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */ -" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */ -" add %8, %3, %3\n"/* add 16-bit proto + len */ -" add %%r19, %0, %0\n" -" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */ -" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */ -" add,dc %%r20, %0, %0\n" -" add,dc %%r21, %0, %0\n" -" add,dc %%r22, %0, %0\n" +" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */ +" ldd,ma 8(%1), %4\n" /* get 1st saddr word */ +" ldd,ma 8(%2), %5\n" /* get 1st daddr word */ +" add %4, %0, %0\n" +" ldd,ma 8(%1), %6\n" /* 2nd saddr */ +" ldd,ma 8(%2), %7\n" /* 2nd daddr */ +" add,dc %5, %0, %0\n" +" add,dc %6, %0, %0\n" +" add,dc %7, %0, %0\n" " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ -" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */ -" depdi 0, 31, 32, %0\n" /* clear upper half */ -" add %%r19, %0, %0\n" /* fold into 32-bits */ -" addc 0, %0, %0\n" /* add carry */ +" extrd,u %0, 31, 32, %4\n"/* copy upper half down */ +" depdi 0, 31, 32, %0\n"/* clear upper half */ +" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */ +" addc 0, %0, %0\n" /* add final carry */ #else @@ -158,52 +148,32 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ** Insn stream is serialized on the carry bit here too. ** result from the previous operation (eg r0 + x) */ - -" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */ -" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */ -" add %8, %3, %3\n" /* add 16-bit proto + len */ -" add %%r19, %0, %0\n" -" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */ -" addc %%r20, %0, %0\n" -" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */ -" addc %%r21, %0, %0\n" -" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */ -" addc %%r22, %0, %0\n" -" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */ -" addc %%r19, %0, %0\n" -" ldw,ma 4(%1), %%r21\n" /* 4th saddr */ -" addc %%r20, %0, %0\n" -" ldw,ma 4(%2), %%r22\n" /* 4th daddr */ -" addc %%r21, %0, %0\n" -" addc %%r22, %0, %0\n" -" addc %3, %0, %0\n" /* fold in proto+len, catch carry */ +" ldw,ma 4(%1), %4\n" /* get 1st saddr word */ +" ldw,ma 4(%2), %5\n" /* get 1st daddr word */ +" add %4, %0, %0\n" +" ldw,ma 4(%1), %6\n" /* 2nd saddr */ +" addc %5, %0, %0\n" +" ldw,ma 4(%2), %7\n" /* 2nd daddr */ +" addc %6, %0, %0\n" +" ldw,ma 4(%1), %4\n" /* 3rd saddr */ +" addc %7, %0, %0\n" +" ldw,ma 4(%2), %5\n" /* 3rd daddr */ +" addc %4, %0, %0\n" +" ldw,ma 4(%1), %6\n" /* 4th saddr */ +" addc %5, %0, %0\n" +" ldw,ma 4(%2), %7\n" /* 4th daddr */ +" addc %6, %0, %0\n" +" addc %7, %0, %0\n" +" addc %3, %0, %0\n" /* fold in proto+len */ +" addc 0, %0, %0\n" /* add carry */ #endif - : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) - : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) - : "r19", "r20", "r21", "r22", "memory"); + : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len), + "=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3) + : "0" (sum), "1" (saddr), "2" (daddr), "3" (len) + : "memory"); return csum_fold(sum); } -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) -{ - /* code stolen from include/asm-mips64 */ - sum = csum_partial(src, len, sum); - - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - return (__force __wsum)-1; - } - - return sum; -} - #endif diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index f627c37dad9c..bf0a0f1189eb 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -14,22 +14,22 @@ extern void __xchg_called_with_bad_pointer(void); /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); +extern unsigned long __xchg8(char, volatile char *); +extern unsigned long __xchg32(int, volatile int *); #ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); +extern unsigned long __xchg64(unsigned long, volatile unsigned long *); #endif /* optimizer better get rid of switch since size is a constant */ static inline unsigned long -__xchg(unsigned long x, __volatile__ void *ptr, int size) +__arch_xchg(unsigned long x, volatile void *ptr, int size) { switch (size) { #ifdef CONFIG_64BIT - case 8: return __xchg64(x, (unsigned long *) ptr); + case 8: return __xchg64(x, (volatile unsigned long *) ptr); #endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); + case 4: return __xchg32((int) x, (volatile int *) ptr); + case 1: return __xchg8((char) x, (volatile char *) ptr); } __xchg_called_with_bad_pointer(); return x; @@ -44,33 +44,39 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) _x_ = (x); \ + __ret = (__typeof__(*(ptr))) \ + __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __ret; \ +}) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, - unsigned int new_); +/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */ +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); +extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_); +extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) { - switch (size) { + return #ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((u64 *)ptr, old, new_); + size == 8 ? __cmpxchg_u64(ptr, old, new_) : #endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, - (unsigned int)old, (unsigned int)new_); - } - __cmpxchg_called_with_bad_pointer(); - return old; + size == 4 ? __cmpxchg_u32(ptr, old, new_) : + size == 2 ? __cmpxchg_u16(ptr, old, new_) : + size == 1 ? __cmpxchg_u8(ptr, old, new_) : + (__cmpxchg_called_with_bad_pointer(), old); } -#define cmpxchg(ptr, o, n) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -90,7 +96,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #endif case 4: return __cmpxchg_u32(ptr, old, new_); default: - return __cmpxchg_local_generic(ptr, old, new_, size); + return __generic_cmpxchg_local(ptr, old, new_, size); } } @@ -98,19 +104,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ -#define cmpxchg_local(ptr, o, n) \ +#define arch_cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) #ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ }) #else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) #endif -#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) +#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) #endif /* _ASM_PARISC_CMPXCHG_H_ */ diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index e03e3c849f40..339d1b833fa7 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -8,22 +8,19 @@ #include <linux/sched.h> #include <linux/thread_info.h> +#define compat_mode_t compat_mode_t +typedef u16 compat_mode_t; + +#define compat_ipc_pid_t compat_ipc_pid_t +typedef u16 compat_ipc_pid_t; + +#define compat_ipc64_perm compat_ipc64_perm + #include <asm-generic/compat.h> -#define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "parisc\0\0" -typedef u32 __compat_uid_t; -typedef u32 __compat_gid_t; -typedef u32 __compat_uid32_t; -typedef u32 __compat_gid32_t; -typedef u16 compat_mode_t; -typedef u32 compat_dev_t; typedef u16 compat_nlink_t; -typedef u16 compat_ipc_pid_t; -typedef u32 compat_caddr_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ @@ -56,37 +53,6 @@ struct compat_stat { u32 st_spare4[3]; }; -struct compat_flock { - short l_type; - short l_whence; - compat_off_t l_start; - compat_off_t l_len; - compat_pid_t l_pid; -}; - -struct compat_flock64 { - short l_type; - short l_whence; - compat_loff_t l_start; - compat_loff_t l_len; - compat_pid_t l_pid; -}; - -struct compat_statfs { - s32 f_type; - s32 f_bsize; - s32 f_blocks; - s32 f_bfree; - s32 f_bavail; - s32 f_files; - s32 f_ffree; - __kernel_fsid_t f_fsid; - s32 f_namelen; - s32 f_frsize; - s32 f_flags; - s32 f_spare[4]; -}; - struct compat_sigcontext { compat_int_t sc_flags; compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */ @@ -96,17 +62,6 @@ struct compat_sigcontext { compat_int_t sc_sar; /* cr11 */ }; -#define COMPAT_RLIM_INFINITY 0xffffffff - -typedef u32 compat_old_sigset_t; /* at least 32 bits */ - -#define _COMPAT_NSIG 64 -#define _COMPAT_NSIG_BPW 32 - -typedef u32 compat_sigset_word; - -#define COMPAT_OFF_T_MAX 0x7fffffff - struct compat_ipc64_perm { compat_key_t key; __compat_uid_t uid; @@ -173,32 +128,9 @@ struct compat_shmid64_ds { #define COMPAT_ELF_NGREG 80 typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; -/* - * A pointer passed in from user mode. This should not - * be used for syscall parameters, just declare them - * as pointers because the syscall entry code will have - * appropriately converted them already. - */ - -static inline void __user *compat_ptr(compat_uptr_t uptr) -{ - return (void __user *)(unsigned long)uptr; -} - -static inline compat_uptr_t ptr_to_compat(void __user *uptr) -{ - return (u32)(unsigned long)uptr; -} - -static __inline__ void __user *arch_compat_alloc_user_space(long len) -{ - struct pt_regs *regs = ¤t->thread.regs; - return (void __user *)regs->gr[30]; -} - static inline int __is_compat_task(struct task_struct *t) { - return test_ti_thread_flag(task_thread_info(t), TIF_32BIT); + return test_tsk_thread_flag(t, TIF_32BIT); } static inline int is_compat_task(void) diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h new file mode 100644 index 000000000000..2814529a4c28 --- /dev/null +++ b/arch/parisc/include/asm/current.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PARISC_CURRENT_H +#define _ASM_PARISC_CURRENT_H + +#ifndef __ASSEMBLER__ +struct task_struct; + +static __always_inline struct task_struct *get_current(void) +{ + struct task_struct *ts; + + /* do not use mfctl() macro as it is marked volatile */ + asm( "mfctl %%cr30,%0" : "=r" (ts) ); + return ts; +} + +#define current get_current() + +#endif /* __ASSEMBLER__ */ + +#endif /* _ASM_PARISC_CURRENT_H */ diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 44a9f97194aa..635665004fe6 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -2,8 +2,6 @@ #ifndef _PARISC_DMA_MAPPING_H #define _PARISC_DMA_MAPPING_H -#include <asm/cacheflush.h> - /* ** We need to support 4 different coherent dma models with one binary: ** @@ -23,53 +21,9 @@ extern const struct dma_map_ops *hppa_dma_ops; -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +static inline const struct dma_map_ops *get_arch_dma_ops(void) { return hppa_dma_ops; } -static inline void * -parisc_walk_tree(struct device *dev) -{ - struct device *otherdev; - if(likely(dev->platform_data != NULL)) - return dev->platform_data; - /* OK, just traverse the bus to find it */ - for(otherdev = dev->parent; otherdev; - otherdev = otherdev->parent) { - if(otherdev->platform_data) { - dev->platform_data = otherdev->platform_data; - break; - } - } - return dev->platform_data; -} - -#define GET_IOC(dev) ({ \ - void *__pdata = parisc_walk_tree(dev); \ - __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ -}) - -#ifdef CONFIG_IOMMU_CCIO -struct parisc_device; -struct ioc; -void * ccio_get_iommu(const struct parisc_device *dev); -int ccio_request_resource(const struct parisc_device *dev, - struct resource *res); -int ccio_allocate_resource(const struct parisc_device *dev, - struct resource *res, unsigned long size, - unsigned long min, unsigned long max, unsigned long align); -#else /* !CONFIG_IOMMU_CCIO */ -#define ccio_get_iommu(dev) NULL -#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res) -#define ccio_allocate_resource(dev, res, size, min, max, align) \ - allocate_resource(&iomem_resource, res, size, min, max, \ - align, NULL, NULL) -#endif /* !CONFIG_IOMMU_CCIO */ - -#ifdef CONFIG_IOMMU_SBA -struct parisc_device; -void * sba_get_iommu(struct parisc_device *dev); -#endif - #endif diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index eea80ed34e6d..582fb5d1a5d5 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -14,6 +14,8 @@ #define dma_outb outb #define dma_inb inb +extern unsigned long pcxl_dma_start; + /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** (or rather not merge) DMAs into manageable chunks. @@ -176,10 +178,4 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) #define free_dma(dmanr) -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - #endif /* _ASM_DMA_H */ diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h index 8fe7d6b2cc42..526f4a79262c 100644 --- a/arch/parisc/include/asm/dwarf.h +++ b/arch/parisc/include/asm/dwarf.h @@ -1,15 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Helge Deller <deller@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _ASM_PARISC_DWARF_H #define _ASM_PARISC_DWARF_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc @@ -18,6 +15,6 @@ #define CFI_REL_OFFSET .cfi_rel_offset #define CFI_UNDEFINED .cfi_undefined -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_DWARF_H */ diff --git a/arch/parisc/include/asm/eisa_bus.h b/arch/parisc/include/asm/eisa_bus.h index 201085f83dd5..55dba9cd4002 100644 --- a/arch/parisc/include/asm/eisa_bus.h +++ b/arch/parisc/include/asm/eisa_bus.h @@ -1,13 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * eisa_bus.h interface between the eisa BA driver and the bus enumerator * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Copyright (c) 2002 Daniel Engstrom <5116@telia.com> - * */ #ifndef ASM_EISA_H diff --git a/arch/parisc/include/asm/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h index 5637ac962f8e..fdac7fc9484b 100644 --- a/arch/parisc/include/asm/eisa_eeprom.h +++ b/arch/parisc/include/asm/eisa_eeprom.h @@ -1,13 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com> - * */ #ifndef ASM_EISA_EEPROM_H diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index d00973aab7f1..2d73d3c3cd37 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -152,7 +152,7 @@ /* The following are PA function descriptors * * addr: the absolute address of the function - * gp: either the data pointer (r27) for non-PIC code or the + * gp: either the data pointer (r27) for non-PIC code or * the PLT pointer (r19) for PIC code */ /* Format for the Elf32 Function descriptor */ @@ -163,8 +163,7 @@ typedef struct elf32_fdesc { /* Format for the Elf64 Function descriptor */ typedef struct elf64_fdesc { - __u64 dummy[2]; /* FIXME: nothing uses these, why waste - * the space */ + __u64 dummy[2]; /* used by 64-bit eBPF and tracing functions */ __u64 addr; __u64 gp; } Elf64_Fdesc; @@ -305,9 +304,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; struct task_struct; -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) - struct pt_regs; /* forward declaration... */ @@ -353,13 +349,20 @@ struct pt_regs; /* forward declaration... */ #define ELF_HWCAP 0 -/* Masks for stack and mmap randomization */ -#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL) -#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL) -#define STACK_RND_MASK MMAP_RND_MASK - -struct mm_struct; -extern unsigned long arch_randomize_brk(struct mm_struct *); -#define arch_randomize_brk arch_randomize_brk +#define STACK_RND_MASK 0x7ff /* 8MB of VA */ + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack); +#define VDSO_AUX_ENT(a, b) NEW_AUX_ENT(a, b) +#define VDSO_CURRENT_BASE current->mm->context.vdso_base + +#define ARCH_DLINFO \ +do { \ + if (VDSO_CURRENT_BASE) { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);\ + } \ +} while (0) #endif diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h new file mode 100644 index 000000000000..4ea23e3d79dc --- /dev/null +++ b/arch/parisc/include/asm/extable.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PARISC_EXTABLE_H +#define __PARISC_EXTABLE_H + +#include <asm/ptrace.h> +#include <linux/compiler.h> + +/* + * The exception table consists of three addresses: + * + * - A relative address to the instruction that is allowed to fault. + * - A relative address at which the program should continue (fixup routine) + * - An asm statement which specifies which CPU register will + * receive -EFAULT when an exception happens if the lowest bit in + * the fixup address is set. + * + * Note: The register specified in the err_opcode instruction will be + * modified at runtime if a fault happens. Register %r0 will be ignored. + * + * Since relative addresses are used, 32bit values are sufficient even on + * 64bit kernel. + */ + +struct pt_regs; +int fixup_exception(struct pt_regs *regs); + +#define ARCH_HAS_RELATIVE_EXTABLE +struct exception_table_entry { + int insn; /* relative address of insn that is allowed to fault. */ + int fixup; /* relative address of fixup routine */ + int err_opcode; /* sample opcode with register which holds error code */ +}; + +#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\ + ".section __ex_table,\"aw\"\n" \ + ".align 4\n" \ + ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \ + opcode "\n" \ + ".previous\n" + +/* + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry + * (with lowest bit set) for which the fault handler in fixup_exception() will + * load -EFAULT on fault into the register specified by the err_opcode instruction, + * and zeroes the target register in case of a read fault in get_user(). + */ +#define ASM_EXCEPTIONTABLE_VAR(__err_var) \ + int __err_var = 0 +#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\ + ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register) + +static inline void swap_ex_entry_fixup(struct exception_table_entry *a, + struct exception_table_entry *b, + struct exception_table_entry tmp, + int delta) +{ + a->fixup = b->fixup + delta; + b->fixup = tmp.fixup - delta; + a->err_opcode = b->err_opcode; + b->err_opcode = tmp.err_opcode; +} +#define swap_ex_entry_fixup swap_ex_entry_fixup + +#endif diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h deleted file mode 100644 index c4cd6360f996..000000000000 --- a/arch/parisc/include/asm/fb.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include <linux/fb.h> -#include <linux/fs.h> -#include <asm/page.h> - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) -{ - pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; -} - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index f7c3a0905de4..9cafa449c4a7 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -9,23 +9,56 @@ * * All of the values in this file must be <4GB (because of assembly * loading restrictions). If you place this region anywhere above - * __PAGE_OFFSET, you must adjust the memory map accordingly */ + * __PAGE_OFFSET, you must adjust the memory map accordingly + */ -/* The alias region is used in kernel space to do copy/clear to or - * from areas congruently mapped with user space. It is 8MB large - * and must be 16MB aligned */ -#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024) +/* + * The tmpalias region is used in kernel space to copy/clear/flush data + * from pages congruently mapped with user space. It is comprised of + * a pair regions. The size of these regions is determined by the largest + * cache aliasing boundary for machines that support equivalent aliasing. + * + * The c3750 with PA8700 processor returns an alias value of 11. This + * indicates that it has an alias boundary of 4 MB. It also supports + * non-equivalent aliasing without a performance penalty. + * + * Machines with PA8800/PA8900 processors return an alias value of 0. + * This indicates the alias boundary is unknown and may be larger than + * 16 MB. Non-equivalent aliasing is not supported. + * + * Here we assume the maximum alias boundary is 4 MB. + */ +#define TMPALIAS_SIZE_BITS 22 /* 4 MB */ +#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - (2 << TMPALIAS_SIZE_BITS)) + +#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT) +#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE) /* This is the kernel area for all maps (vmalloc, dma etc.) most * usually, it extends up to TMPALIAS_MAP_START. Virtual addresses * 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */ #define KERNEL_MAP_START (GATEWAY_PAGE_SIZE) -#define KERNEL_MAP_END (TMPALIAS_MAP_START) +#define KERNEL_MAP_END (FIXMAP_START) + +#ifndef __ASSEMBLER__ + + +enum fixed_addresses { + /* Support writing RO kernel text via kprobes, jump labels, etc. */ + FIX_TEXT_POKE0, + FIX_TEXT_KEXEC, + FIX_BITMAP_COUNT +}; -#ifndef __ASSEMBLY__ extern void *parisc_vmalloc_start; #define PCXL_DMA_MAP_SIZE (8*1024*1024) #define VMALLOC_START ((unsigned long)parisc_vmalloc_start) #define VMALLOC_END (KERNEL_MAP_END) -#endif /*__ASSEMBLY__*/ + +#define __fix_to_virt(_x) (FIXMAP_START + ((_x) << PAGE_SHIFT)) + +void set_fixmap(enum fixed_addresses idx, phys_addr_t phys); +void clear_fixmap(enum fixed_addresses idx); + +#endif /*__ASSEMBLER__*/ #endif /*_ASM_FIXMAP_H*/ diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h index 6d8276cd25ca..f15b69fea901 100644 --- a/arch/parisc/include/asm/floppy.h +++ b/arch/parisc/include/asm/floppy.h @@ -1,29 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Architecture specific parts of the Floppy driver * * Linux/PA-RISC Project (http://www.parisc-linux.org/) * Copyright (C) 2000 Matthew Wilcox (willy a debian . org) * Copyright (C) 2000 Dave Kennedy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_PARISC_FLOPPY_H #define __ASM_PARISC_FLOPPY_H +#include <linux/sizes.h> #include <linux/vmalloc.h> - /* * The DMA channel used by the floppy controller cannot access data at * addresses >= 16MB @@ -33,17 +20,14 @@ * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ -(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) - -#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) - + (!(vdma) && \ + ((unsigned long)(a) / SZ_64K != ((unsigned long)(a) + (s) - 1) / SZ_64K)) #define SW fd_routine[use_virtual_dma&1] #define CSW fd_routine[can_use_virtual_dma & 1] - -#define fd_inb(port) readb(port) -#define fd_outb(value, port) writeb(value, port) +#define fd_inb(base, reg) readb((base) + (reg)) +#define fd_outb(value, base, reg) writeb(value, (base) + (reg)) #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) @@ -88,27 +72,28 @@ static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) register char *lptr = virtual_dma_addr; for (lcount = virtual_dma_count; lcount; lcount--) { - st = fd_inb(virtual_dma_port+4) & 0xa0 ; - if (st != 0xa0) + st = fd_inb(virtual_dma_port, FD_STATUS); + st &= STATUS_DMA | STATUS_READY; + if (st != (STATUS_DMA | STATUS_READY)) break; if (virtual_dma_mode) { - fd_outb(*lptr, virtual_dma_port+5); + fd_outb(*lptr, virtual_dma_port, FD_DATA); } else { - *lptr = fd_inb(virtual_dma_port+5); + *lptr = fd_inb(virtual_dma_port, FD_DATA); } lptr++; } virtual_dma_count = lcount; virtual_dma_addr = lptr; - st = fd_inb(virtual_dma_port+4); + st = fd_inb(virtual_dma_port, FD_STATUS); } #ifdef TRACE_FLPY_INT calls++; #endif - if (st == 0x20) + if (st == STATUS_DMA) return; - if (!(st & 0x20)) { + if (!(st & STATUS_DMA)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count = 0; #ifdef TRACE_FLPY_INT @@ -191,7 +176,7 @@ static void _fd_chose_dma_mode(char *addr, unsigned long size) { if(can_use_virtual_dma == 2) { if((unsigned int) addr >= (unsigned int) high_memory || - virt_to_bus(addr) >= 0x1000000 || + virt_to_phys(addr) >= 0x1000000 || _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else @@ -218,7 +203,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) { #ifdef FLOPPY_SANITY_CHECK - if (CROSS_64KB(addr, size)) { + if (_CROSS_64KB(addr, size, use_virtual_dma & 1)) { printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); return -1; } @@ -227,7 +212,7 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA,mode); - set_dma_addr(FLOPPY_DMA,virt_to_bus(addr)); + set_dma_addr(FLOPPY_DMA,virt_to_phys(addr)); set_dma_count(FLOPPY_DMA,size); enable_dma(FLOPPY_DMA); return 0; diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index 42b2c75a1645..8b89d2b642eb 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h @@ -2,17 +2,33 @@ #ifndef _ASM_PARISC_FTRACE_H #define _ASM_PARISC_FTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void mcount(void); -#define MCOUNT_INSN_SIZE 4 - +#define MCOUNT_ADDR ((unsigned long)mcount) +#define MCOUNT_INSN_SIZE 4 +#define CC_USING_NOP_MCOUNT +#define ARCH_SUPPORTS_FTRACE_OPS 1 extern unsigned long sys_call_table[]; extern unsigned long return_address(unsigned int); +struct ftrace_regs; +extern void ftrace_function_trampoline(unsigned long parent, + unsigned long self_addr, unsigned long org_sp_gr3, + struct ftrace_regs *fregs); + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_caller(void); + +struct dyn_arch_ftrace { +}; + +unsigned long ftrace_call_adjust(unsigned long addr); + +#endif #define ftrace_return_address(n) return_address(n) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_PARISC_FTRACE_H */ diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index d2c3e4106851..3222206cb3ea 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -2,32 +2,31 @@ #ifndef _ASM_PARISC_FUTEX_H #define _ASM_PARISC_FUTEX_H -#ifdef __KERNEL__ - #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/atomic.h> #include <asm/errno.h> /* The following has to match the LWS code in syscall.S. We have - sixteen four-word locks. */ + * 256 four-word locks. We use bits 20-27 of the futex virtual + * address for the hash index. + */ + +static inline unsigned long _futex_hash_index(unsigned long ua) +{ + return (ua >> 2) & 0x3fc; +} static inline void -_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) +_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags) { - extern u32 lws_lock_start[]; - long index = ((long)uaddr & 0xf0) >> 2; - arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; local_irq_save(*flags); arch_spin_lock(s); } static inline void -_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) +_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags) { - extern u32 lws_lock_start[]; - long index = ((long)uaddr & 0xf0) >> 2; - arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; arch_spin_unlock(s); local_irq_restore(*flags); } @@ -35,16 +34,21 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { - unsigned long int flags; + extern u32 lws_lock_start[]; + unsigned long ua = (unsigned long)uaddr; + arch_spinlock_t *s; + unsigned long flags; int oldval, ret; u32 tmp; - _futex_spin_lock_irqsave(uaddr, &flags); - pagefault_disable(); + s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)]; + _futex_spin_lock_irqsave(s, &flags); - ret = -EFAULT; - if (unlikely(get_user(oldval, uaddr) != 0)) + /* Return -EFAULT if we encounter a page fault or COW break */ + if (unlikely(get_user(oldval, uaddr) != 0)) { + ret = -EFAULT; goto out_pagefault_enable; + } ret = 0; tmp = oldval; @@ -67,14 +71,14 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) break; default: ret = -ENOSYS; + goto out_pagefault_enable; } - if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) + if (unlikely(put_user(tmp, uaddr) != 0)) ret = -EFAULT; out_pagefault_enable: - pagefault_enable(); - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock_irqrestore(s, &flags); if (!ret) *oval = oldval; @@ -86,40 +90,38 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + extern u32 lws_lock_start[]; + unsigned long ua = (unsigned long)uaddr; + arch_spinlock_t *s; u32 val; unsigned long flags; - /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is - * our gateway page, and causes no end of trouble... - */ - if (uaccess_kernel() && !uaddr) - return -EFAULT; - if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; /* HPPA has no cmpxchg in hardware and therefore the * best we can do here is use an array of locks. The - * lock selected is based on a hash of the userspace - * address. This should scale to a couple of CPUs. + * lock selected is based on a hash of the virtual + * address of the futex. This should scale to a couple + * of CPUs. */ - _futex_spin_lock_irqsave(uaddr, &flags); + s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)]; + _futex_spin_lock_irqsave(s, &flags); if (unlikely(get_user(val, uaddr) != 0)) { - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock_irqrestore(s, &flags); return -EFAULT; } if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock_irqrestore(s, &flags); return -EFAULT; } *uval = val; - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock_irqrestore(s, &flags); return 0; } -#endif /*__KERNEL__*/ #endif /*_ASM_PARISC_FUTEX_H*/ diff --git a/arch/parisc/include/asm/grfioctl.h b/arch/parisc/include/asm/grfioctl.h index 671e06042b40..597201530d20 100644 --- a/arch/parisc/include/asm/grfioctl.h +++ b/arch/parisc/include/asm/grfioctl.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Architecture specific parts of HP's STI (framebuffer) driver. * Structures are HP-UX compatible for XFree86 usage. * * Linux/PA-RISC Project (http://www.parisc-linux.org/) * Copyright (C) 2001 Helge Deller (deller a parisc-linux org) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_PARISC_GRFIOCTL_H @@ -72,42 +59,4 @@ #define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */ #define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */ -/* structure for ioctl(GCDESCRIBE) */ - -#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */ - -struct grf_fbinfo { - unsigned int id; /* upper 32 bits of graphics id */ - unsigned int mapsize; /* mapped size of framebuffer */ - unsigned int dwidth, dlength;/* x and y sizes */ - unsigned int width, length; /* total x and total y size */ - unsigned int xlen; /* x pitch size */ - unsigned int bpp, bppu; /* bits per pixel and used bpp */ - unsigned int npl, nplbytes; /* # of planes and bytes per plane */ - char name[32]; /* name of the device (from ROM) */ - unsigned int attr; /* attributes */ - gaddr_t fbbase, regbase;/* framebuffer and register base addr */ - gaddr_t regions[6]; /* region bases */ -}; - -#define GCID _IOR('G', 0, int) -#define GCON _IO('G', 1) -#define GCOFF _IO('G', 2) -#define GCAON _IO('G', 3) -#define GCAOFF _IO('G', 4) -#define GCMAP _IOWR('G', 5, int) -#define GCUNMAP _IOWR('G', 6, int) -#define GCMAP_HPUX _IO('G', 5) -#define GCUNMAP_HPUX _IO('G', 6) -#define GCLOCK _IO('G', 7) -#define GCUNLOCK _IO('G', 8) -#define GCLOCK_MINIMUM _IO('G', 9) -#define GCUNLOCK_MINIMUM _IO('G', 10) -#define GCSTATIC_CMAP _IO('G', 11) -#define GCVARIABLE_CMAP _IO('G', 12) -#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */ -#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo) -#define GCFASTLOCK _IO('G', 26) - #endif /* __ASM_PARISC_GRFIOCTL_H */ - diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 1a1235a9d533..1e4fbd0fd944 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -12,16 +12,13 @@ #include <linux/threads.h> #include <linux/irq.h> -#ifdef CONFIG_IRQSTACKS -#define __ARCH_HAS_DO_SOFTIRQ -#endif - typedef struct { unsigned int __softirq_pending; unsigned int kernel_stack_usage; unsigned int irq_stack_usage; #ifdef CONFIG_SMP unsigned int irq_resched_count; + unsigned int irq_call_count; #endif unsigned int irq_unaligned_count; unsigned int irq_fpassist_count; @@ -31,7 +28,6 @@ typedef struct { DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT -#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) #define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) #define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq) diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index d6e1ed145031..a005ebc54779 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -10,12 +10,12 @@ #define SVERSION_ANY_ID PA_SVERSION_ANY_ID struct hp_hardware { - unsigned short hw_type:5; /* HPHW_xxx */ - unsigned short hversion; - unsigned long sversion:28; - unsigned short opt; - const char name[80]; /* The hardware description */ -}; + unsigned int hw_type:8; /* HPHW_xxx */ + unsigned int hversion:12; + unsigned int sversion:12; + unsigned char opt; + unsigned char name[59]; /* The hardware description */ +} __packed; struct parisc_device; @@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path); extern void init_parisc_bus(void); extern struct device *hwpath_to_device(struct hardware_path *modpath); extern void device_to_hwpath(struct device *dev, struct hardware_path *path); - +extern int machine_has_merced_bus(void); /* inventory.c: */ extern void do_memory_inventory(void); diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index 7cb595dcb7d7..21e9ace17739 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -6,37 +6,17 @@ #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte); + pte_t *ptep, pte_t pte, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - -static inline int is_hugepage_only_range(struct mm_struct *mm, - unsigned long addr, - unsigned long len) { - return 0; -} - -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - if (len & ~HPAGE_MASK) - return -EINVAL; - if (addr & ~HPAGE_MASK) - return -EINVAL; - return 0; -} + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { + return *ptep; } #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT @@ -48,10 +28,6 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); -static inline void arch_clear_hugepage_flags(struct page *page) -{ -} - #include <asm-generic/hugetlb.h> #endif /* _ASM_PARISC64_HUGETLB_H */ diff --git a/arch/parisc/include/asm/ide.h b/arch/parisc/include/asm/ide.h deleted file mode 100644 index 34cdac01ed35..000000000000 --- a/arch/parisc/include/asm/ide.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/asm-parisc/ide.h - * - * Copyright (C) 1994-1996 Linus Torvalds & authors - */ - -/* - * This file contains the PARISC architecture specific IDE code. - */ - -#ifndef __ASM_PARISC_IDE_H -#define __ASM_PARISC_IDE_H - -#ifdef __KERNEL__ - -/* Generic I/O and MEMIO string operations. */ - -#define __ide_insw insw -#define __ide_insl insl -#define __ide_outsw outsw -#define __ide_outsl outsl - -static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - *(u16 *)addr = __raw_readw(port); - addr += 2; - } -} - -static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - *(u32 *)addr = __raw_readl(port); - addr += 4; - } -} - -static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - __raw_writew(*(u16 *)addr, port); - addr += 2; - } -} - -static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - __raw_writel(*(u32 *)addr, port); - addr += 4; - } -} - -#endif /* __KERNEL__ */ - -#endif /* __ASM_PARISC_IDE_H */ diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index afe493b23d04..f01ad3ad60b5 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -3,12 +3,10 @@ #define _ASM_IO_H #include <linux/types.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #define virt_to_phys(a) ((unsigned long)__pa(a)) #define phys_to_virt(a) __va(a) -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt static inline unsigned long isa_bus_to_virt(unsigned long addr) { BUG(); @@ -127,113 +125,15 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) /* * The standard PCI ioremap interfaces */ +#define ioremap_prot ioremap_prot -extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); +#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_NO_CACHE) -/* Most machines react poorly to I/O-space being cacheable... Instead let's - * define ioremap() in terms of ioremap_nocache(). - */ -static inline void __iomem * ioremap(unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, _PAGE_NO_CACHE); -} -#define ioremap_nocache(off, sz) ioremap((off), (sz)) -#define ioremap_wc ioremap_nocache -#define ioremap_uc ioremap_nocache - -extern void iounmap(const volatile void __iomem *addr); - -static inline unsigned char __raw_readb(const volatile void __iomem *addr) -{ - return (*(volatile unsigned char __force *) (addr)); -} -static inline unsigned short __raw_readw(const volatile void __iomem *addr) -{ - return *(volatile unsigned short __force *) addr; -} -static inline unsigned int __raw_readl(const volatile void __iomem *addr) -{ - return *(volatile unsigned int __force *) addr; -} -static inline unsigned long long __raw_readq(const volatile void __iomem *addr) -{ - return *(volatile unsigned long long __force *) addr; -} - -static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr) -{ - *(volatile unsigned char __force *) addr = b; -} -static inline void __raw_writew(unsigned short b, volatile void __iomem *addr) -{ - *(volatile unsigned short __force *) addr = b; -} -static inline void __raw_writel(unsigned int b, volatile void __iomem *addr) -{ - *(volatile unsigned int __force *) addr = b; -} -static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr) -{ - *(volatile unsigned long long __force *) addr = b; -} - -static inline unsigned char readb(const volatile void __iomem *addr) -{ - return __raw_readb(addr); -} -static inline unsigned short readw(const volatile void __iomem *addr) -{ - return le16_to_cpu((__le16 __force) __raw_readw(addr)); -} -static inline unsigned int readl(const volatile void __iomem *addr) -{ - return le32_to_cpu((__le32 __force) __raw_readl(addr)); -} -static inline unsigned long long readq(const volatile void __iomem *addr) -{ - return le64_to_cpu((__le64 __force) __raw_readq(addr)); -} +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP)) -static inline void writeb(unsigned char b, volatile void __iomem *addr) -{ - __raw_writeb(b, addr); -} -static inline void writew(unsigned short w, volatile void __iomem *addr) -{ - __raw_writew((__u16 __force) cpu_to_le16(w), addr); -} -static inline void writel(unsigned int l, volatile void __iomem *addr) -{ - __raw_writel((__u32 __force) cpu_to_le32(l), addr); -} -static inline void writeq(unsigned long long q, volatile void __iomem *addr) -{ - __raw_writeq((__u64 __force) cpu_to_le64(q), addr); -} - -#define readb readb -#define readw readw -#define readl readl -#define readq readq -#define writeb writeb -#define writew writew -#define writel writel -#define writeq writeq - -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) -#define writeb_relaxed(b, addr) writeb(b, addr) -#define writew_relaxed(w, addr) writew(w, addr) -#define writel_relaxed(l, addr) writel(l, addr) -#define writeq_relaxed(q, addr) writeq(q, addr) - -#define mmiowb() do { } while (0) - -void memset_io(volatile void __iomem *addr, unsigned char val, int count); -void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); -void memcpy_toio(volatile void __iomem *dst, const void *src, int count); +#define pci_iounmap pci_iounmap /* Port-space IO */ @@ -255,10 +155,15 @@ extern void eisa_out32(unsigned int data, unsigned short port); extern unsigned char inb(int addr); extern unsigned short inw(int addr); extern unsigned int inl(int addr); - extern void outb(unsigned char b, int addr); extern void outw(unsigned short b, int addr); extern void outl(unsigned int b, int addr); +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl #elif defined(CONFIG_EISA) #define inb eisa_in8 #define inw eisa_in16 @@ -284,10 +189,12 @@ static inline int inl(unsigned long addr) BUG(); return -1; } - -#define outb(x, y) BUG() -#define outw(x, y) BUG() -#define outl(x, y) BUG() +#define inb inb +#define inw inw +#define inl inl +#define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;}) +#define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;}) +#define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;}) #endif /* @@ -299,7 +206,12 @@ extern void insl (unsigned long port, void *dst, unsigned long count); extern void outsb (unsigned long port, const void *src, unsigned long count); extern void outsw (unsigned long port, const void *src, unsigned long count); extern void outsl (unsigned long port, const void *src, unsigned long count); - +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl /* IO Port space is : BBiiii where BB is HBA number. */ #define IO_SPACE_LIMIT 0x00ffffff @@ -311,17 +223,61 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); * value for either 32 or 64 bit mode */ #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) -#include <asm-generic/iomap.h> - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) +#ifdef CONFIG_64BIT +extern u64 ioread64(const void __iomem *addr); +extern u64 ioread64be(const void __iomem *addr); +#define ioread64 ioread64 +#define ioread64be ioread64be + +extern void iowrite64(u64 val, void __iomem *addr); +extern void iowrite64be(u64 val, void __iomem *addr); +#define iowrite64 iowrite64 +#define iowrite64be iowrite64be +#endif -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); +#define ioport_map ioport_map +#define ioport_unmap ioport_unmap + +extern unsigned int ioread8(const void __iomem *); +extern unsigned int ioread16(const void __iomem *); +extern unsigned int ioread16be(const void __iomem *); +extern unsigned int ioread32(const void __iomem *); +extern unsigned int ioread32be(const void __iomem *); +#define ioread8 ioread8 +#define ioread16 ioread16 +#define ioread32 ioread32 +#define ioread16be ioread16be +#define ioread32be ioread32be + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +extern void iowrite32(u32, void __iomem *); +extern void iowrite32be(u32, void __iomem *); +#define iowrite8 iowrite8 +#define iowrite16 iowrite16 +#define iowrite32 iowrite32 +#define iowrite16be iowrite16be +#define iowrite32be iowrite32be + +extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count); +#define ioread8_rep ioread8_rep +#define ioread16_rep ioread16_rep +#define ioread32_rep ioread32_rep + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); +#define iowrite8_rep iowrite8_rep +#define iowrite16_rep iowrite16_rep +#define iowrite32_rep iowrite32_rep + +extern int devmem_is_allowed(unsigned long pfn); + +#include <asm-generic/io.h> #endif diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h index 959e79cd2c14..378f63c4015b 100644 --- a/arch/parisc/include/asm/irq.h +++ b/arch/parisc/include/asm/irq.h @@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest); -/* soft power switch support (power.c) */ -extern struct tasklet_struct power_tasklet; - #endif /* _ASM_PARISC_IRQ_H */ diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h index 38a19c0bac3a..00fd87724588 100644 --- a/arch/parisc/include/asm/irqflags.h +++ b/arch/parisc/include/asm/irqflags.h @@ -31,6 +31,11 @@ static inline unsigned long arch_local_irq_save(void) static inline void arch_local_irq_restore(unsigned long flags) { + /* warn if IRQs are on although they should be off */ + if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)) + if (arch_local_save_flags() & PSW_I) + asm volatile("break 6,6\n"); /* SPINLOCK_BREAK_INSN */ + asm volatile("mtsm %0" : : "r" (flags) : "memory"); } diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h new file mode 100644 index 000000000000..f325ae3c622f --- /dev/null +++ b/arch/parisc/include/asm/jump_label.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PARISC_JUMP_LABEL_H +#define _ASM_PARISC_JUMP_LABEL_H + +#ifndef __ASSEMBLER__ + +#include <linux/types.h> +#include <linux/stringify.h> +#include <asm/assembly.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm goto("1:\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align %1\n\t" + ".word 1b - ., %l[l_yes] - .\n\t" + __stringify(ASM_ULONG_INSN) " %c0 - .\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]), "i" (sizeof(long)) + : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm goto("1:\n\t" + "b,n %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align %1\n\t" + ".word 1b - ., %l[l_yes] - .\n\t" + __stringify(ASM_ULONG_INSN) " %c0 - .\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]), "i" (sizeof(long)) + : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLER__ */ +#endif diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h new file mode 100644 index 000000000000..bf31e2d50df9 --- /dev/null +++ b/arch/parisc/include/asm/kexec.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PARISC_KEXEC_H +#define _ASM_PARISC_KEXEC_H + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_PARISC +#define ARCH_HAS_KIMAGE_ARCH + +#ifndef __ASSEMBLER__ + +struct kimage_arch { + unsigned long initrd_start; + unsigned long initrd_end; + unsigned long cmdline; +}; + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* __ASSEMBLER__ */ + +#endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/include/asm/kfence.h b/arch/parisc/include/asm/kfence.h new file mode 100644 index 000000000000..6259e5ac1fea --- /dev/null +++ b/arch/parisc/include/asm/kfence.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PA-RISC KFENCE support. + * + * Copyright (C) 2021, Helge Deller <deller@gmx.de> + */ + +#ifndef _ASM_PARISC_KFENCE_H +#define _ASM_PARISC_KFENCE_H + +#include <linux/kfence.h> + +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +static inline bool arch_kfence_init_pool(void) +{ + return true; +} + +/* Protect the given page and flush TLB. */ +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + pte_t *pte = virt_to_kpte(addr); + + if (WARN_ON(!pte)) + return false; + + /* + * We need to avoid IPIs, as we may get KFENCE allocations or faults + * with interrupts disabled. + */ + + if (protect) + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + else + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + return true; +} + +#endif /* _ASM_PARISC_KFENCE_H */ diff --git a/arch/parisc/include/asm/kgdb.h b/arch/parisc/include/asm/kgdb.h new file mode 100644 index 000000000000..9ece98bc6d9d --- /dev/null +++ b/arch/parisc/include/asm/kgdb.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PA-RISC KGDB support + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + * + */ + +#ifndef __PARISC_KGDB_H__ +#define __PARISC_KGDB_H__ + +#define BREAK_INSTR_SIZE 4 +#define PARISC_KGDB_COMPILED_BREAK_INSN 0x3ffc01f +#define PARISC_KGDB_BREAK_INSN 0x3ffa01f + + +#define NUMREGBYTES sizeof(struct parisc_gdb_regs) +#define BUFMAX 4096 + +#define KGDB_MAX_BREAKPOINTS 40 + +#define CACHE_FLUSH_IS_SAFE 1 + +#ifndef __ASSEMBLER__ + +static inline void arch_kgdb_breakpoint(void) +{ + asm(".word %0" : : "i"(PARISC_KGDB_COMPILED_BREAK_INSN) : "memory"); +} + +struct parisc_gdb_regs { + unsigned long gpr[32]; + unsigned long sar; + unsigned long iaoq_f; + unsigned long iasq_f; + unsigned long iaoq_b; + unsigned long iasq_b; + unsigned long eiem; + unsigned long iir; + unsigned long isr; + unsigned long ior; + unsigned long ipsw; + unsigned long __unused0; + unsigned long sr4; + unsigned long sr0; + unsigned long sr1; + unsigned long sr2; + unsigned long sr3; + unsigned long sr5; + unsigned long sr6; + unsigned long sr7; + unsigned long cr0; + unsigned long pid1; + unsigned long pid2; + unsigned long scrccr; + unsigned long pid3; + unsigned long pid4; + unsigned long cr24; + unsigned long cr25; + unsigned long cr26; + unsigned long cr27; + unsigned long cr28; + unsigned long cr29; + unsigned long cr30; + + u64 fr[32]; +}; + +#endif +#endif diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h deleted file mode 100644 index 3e70b5cd1123..000000000000 --- a/arch/parisc/include/asm/kmap_types.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H - -#ifdef CONFIG_DEBUG_HIGHMEM -#define __WITH_KM_FENCE -#endif - -#include <asm-generic/kmap_types.h> - -#undef __WITH_KM_FENCE - -#endif diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h new file mode 100644 index 000000000000..0f42f5c8e3b6 --- /dev/null +++ b/arch/parisc/include/asm/kprobes.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/parisc/include/asm/kprobes.h + * + * PA-RISC kprobes implementation + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + */ + +#ifndef _PARISC_KPROBES_H +#define _PARISC_KPROBES_H + +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES + +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/notifier.h> + +#define PARISC_KPROBES_BREAK_INSN 0x3ff801f +#define PARISC_KPROBES_BREAK_INSN2 0x3ff801e +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 + +typedef u32 kprobe_opcode_t; +struct kprobe; + +void arch_remove_kprobe(struct kprobe *p); + +#define flush_insn_slot(p) \ + flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \ + (unsigned long)&(p)->ainsn.insn[0] + \ + MAX_INSN_SIZE*sizeof(kprobe_opcode_t)) + +#define kretprobe_blacklist_size 0 + +struct arch_specific_insn { + kprobe_opcode_t *insn; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +struct kprobe_ctlblk { + unsigned int kprobe_status; + struct prev_kprobe prev_kprobe; + unsigned long iaoq[2]; +}; + +int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs); +int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs); +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + return 0; +} + +#endif /* CONFIG_KPROBES */ +#endif /* _PARISC_KPROBES_H */ diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index 3eb4bfc1fb36..47ebc4c91eaf 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -2,39 +2,42 @@ #ifndef __PARISC_LDCW_H #define __PARISC_LDCW_H -#ifndef CONFIG_PA20 /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, and GCC only guarantees 8-byte alignment for stack locals, we can't be assured of 16-byte alignment for atomic lock data even if we specify "__attribute ((aligned(16)))" in the type declaration. So, we use a struct containing an array of four ints for the atomic lock type and dynamically select the 16-byte aligned int from the array - for the semaphore. */ + for the semaphore. */ + +/* From: "Jim Hull" <jim.hull of hp.com> + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is implemented, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). + + Although the cache control hint is accepted by all PA 2.0 processors, + it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still + require 16-byte alignment. If the address is unaligned, the operation + of the instruction is undefined. The ldcw instruction does not generate + unaligned data reference traps so misaligned accesses are not detected. + This hid the problem for years. So, restore the 16-byte alignment dropped + by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */ #define __PA_LDCW_ALIGNMENT 16 -#define __PA_LDCW_ALIGN_ORDER 4 #define __ldcw_align(a) ({ \ unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ & ~(__PA_LDCW_ALIGNMENT - 1); \ (volatile unsigned int *) __ret; \ }) -#define __LDCW "ldcw" -#else /*CONFIG_PA20*/ -/* From: "Jim Hull" <jim.hull of hp.com> - I've attached a summary of the change, but basically, for PA 2.0, as - long as the ",CO" (coherent operation) completer is specified, then the - 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead - they only require "natural" alignment (4-byte for ldcw, 8-byte for - ldcd). */ - -#define __PA_LDCW_ALIGNMENT 4 -#define __PA_LDCW_ALIGN_ORDER 2 -#define __ldcw_align(a) (&(a)->slock) +#ifdef CONFIG_PA20 #define __LDCW "ldcw,co" - -#endif /*!CONFIG_PA20*/ +#else +#define __LDCW "ldcw" +#endif /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. We don't explicitly expose that "*a" may be written as reload @@ -52,7 +55,7 @@ }) #ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) +# define __lock_aligned __section(".data..lock_aligned") __aligned(16) #endif #endif /* __PARISC_LDCW_H */ diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h index 6de13d08a388..0aea47eff48d 100644 --- a/arch/parisc/include/asm/led.h +++ b/arch/parisc/include/asm/led.h @@ -11,8 +11,8 @@ #define LED1 0x02 #define LED0 0x01 /* bottom (or furthest left) LED */ -#define LED_LAN_TX LED0 /* for LAN transmit activity */ -#define LED_LAN_RCV LED1 /* for LAN receive activity */ +#define LED_LAN_RCV LED0 /* for LAN receive activity */ +#define LED_LAN_TX LED1 /* for LAN transmit activity */ #define LED_DISK_IO LED2 /* for disk activity */ #define LED_HEARTBEAT LED3 /* heartbeat */ @@ -25,19 +25,13 @@ #define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ /* register_led_driver() */ -int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); - -/* registers the LED regions for procfs */ -void __init register_led_regions(void); +int register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); #ifdef CONFIG_CHASSIS_LCD_LED /* writes a string to the LCD display (if possible on this h/w) */ -int lcd_print(const char *str); +void lcd_print(const char *str); #else -#define lcd_print(str) +#define lcd_print(str) do { } while (0) #endif -/* main LED initialization function (uses PDC) */ -int __init led_init(void); - #endif /* LED_H */ diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h index cd6fe4febead..d4cad492b971 100644 --- a/arch/parisc/include/asm/linkage.h +++ b/arch/parisc/include/asm/linkage.h @@ -15,7 +15,7 @@ */ #define ASM_NL ! -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define ENTRY(name) \ ALIGN !\ @@ -35,6 +35,6 @@ name: ASM_NL\ .procend ASM_NL\ ENDPROC(name) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_LINKAGE_H */ diff --git a/arch/parisc/include/asm/machdep.h b/arch/parisc/include/asm/machdep.h deleted file mode 100644 index 215d2c43989d..000000000000 --- a/arch/parisc/include/asm/machdep.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _PARISC_MACHDEP_H -#define _PARISC_MACHDEP_H - -#include <linux/notifier.h> - -#define MACH_RESTART 1 -#define MACH_HALT 2 -#define MACH_POWER_ON 3 -#define MACH_POWER_OFF 4 - -extern struct notifier_block *mach_notifier; -extern void pa7300lc_init(void); - -extern void (*cpu_lpmc)(int, struct pt_regs *); - -#endif diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h deleted file mode 100644 index eb84dbeb7fd9..000000000000 --- a/arch/parisc/include/asm/mckinley.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_PARISC_MCKINLEY_H -#define ASM_PARISC_MCKINLEY_H -#ifdef __KERNEL__ - -/* declared in arch/parisc/kernel/setup.c */ -extern struct proc_dir_entry * proc_mckinley_root; - -#endif /*__KERNEL__*/ -#endif /*ASM_PARISC_MCKINLEY_H*/ diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h new file mode 100644 index 000000000000..663f587dc789 --- /dev/null +++ b/arch/parisc/include/asm/mman.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMAN_H__ +#define __ASM_MMAN_H__ + +#include <linux/fs.h> +#include <uapi/asm/mman.h> + +/* PARISC cannot allow mdwe as it needs writable stacks */ +static inline bool arch_memory_deny_write_exec_supported(void) +{ + return false; +} +#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported + +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags) +{ + /* + * The stack on parisc grows upwards, so if userspace requests memory + * for a stack, mark it with VM_GROWSUP so that the stack expansion in + * the fault handler will work. + */ + if (flags & MAP_STACK) + return VM_GROWSUP; + + return 0; +} +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) + +#endif /* __ASM_MMAN_H__ */ diff --git a/arch/parisc/include/asm/mmu.h b/arch/parisc/include/asm/mmu.h index 3fb70a601d5c..44fd062b62ed 100644 --- a/arch/parisc/include/asm/mmu.h +++ b/arch/parisc/include/asm/mmu.h @@ -2,7 +2,9 @@ #ifndef _PARISC_MMU_H_ #define _PARISC_MMU_H_ -/* On parisc, we store the space id here */ -typedef unsigned long mm_context_t; +typedef struct { + unsigned long space_id; + unsigned long vdso_base; +} mm_context_t; #endif /* _PARISC_MMU_H_ */ diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 697a906ab1b0..c9187fe836a3 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -5,48 +5,45 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/atomic.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> +#include <linux/spinlock.h> #include <asm-generic/mm_hooks.h> -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - /* on PA-RISC, we actually have enough contexts to justify an allocator * for them. prumpf */ extern unsigned long alloc_sid(void); extern void free_sid(unsigned long); +#define init_new_context init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_users) != 1); - mm->context = alloc_sid(); + mm->context.space_id = alloc_sid(); return 0; } +#define destroy_context destroy_context static inline void destroy_context(struct mm_struct *mm) { - free_sid(mm->context); - mm->context = 0; + free_sid(mm->context.space_id); + mm->context.space_id = 0; } static inline unsigned long __space_to_prot(mm_context_t context) { #if SPACEID_SHIFT == 0 - return context << 1; + return context.space_id << 1; #else - return context >> (SPACEID_SHIFT - 1); + return context.space_id >> (SPACEID_SHIFT - 1); #endif } static inline void load_context(mm_context_t context) { - mtsp(context, 3); + mtsp(context.space_id, SR_USER); mtctl(__space_to_prot(context), 8); } @@ -54,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { +#ifdef CONFIG_TLB_PTLOCK + /* put physical address of page_table_lock in cr28 (tr4) + for TLB faults */ + spinlock_t *pgd_lock = &next->page_table_lock; + mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28); +#endif mtctl(__pa(next->pgd), 25); load_context(next->context); } @@ -73,8 +76,7 @@ static inline void switch_mm(struct mm_struct *prev, } #define switch_mm_irqs_off switch_mm_irqs_off -#define deactivate_mm(tsk,mm) do { } while (0) - +#define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { /* @@ -87,9 +89,12 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) BUG_ON(next == &init_mm); /* Should never happen */ - if (next->context == 0) - next->context = alloc_sid(); + if (next->context.space_id == 0) + next->context.space_id = alloc_sid(); switch_mm(prev,next,current); } + +#include <asm-generic/mmu_context.h> + #endif diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index fafa3893fd70..8d390406d862 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h @@ -2,62 +2,6 @@ #ifndef _PARISC_MMZONE_H #define _PARISC_MMZONE_H -#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */ +#define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */ -#ifdef CONFIG_DISCONTIGMEM - -extern int npmem_ranges; - -struct node_map_data { - pg_data_t pg_data; -}; - -extern struct node_map_data node_data[]; - -#define NODE_DATA(nid) (&node_data[nid].pg_data) - -/* We have these possible memory map layouts: - * Astro: 0-3.75, 67.75-68, 4-64 - * zx1: 0-1, 257-260, 4-256 - * Stretch (N-class): 0-2, 4-32, 34-xxx - */ - -/* Since each 1GB can only belong to one region (node), we can create - * an index table for pfn to nid lookup; each entry in pfnnid_map - * represents 1GB, and contains the node that the memory belongs to. */ - -#define PFNNID_SHIFT (30 - PAGE_SHIFT) -#define PFNNID_MAP_MAX 512 /* support 512GB */ -extern signed char pfnnid_map[PFNNID_MAP_MAX]; - -#ifndef CONFIG_64BIT -#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) -#else -/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */ -#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT)) -#endif - -static inline int pfn_to_nid(unsigned long pfn) -{ - unsigned int i; - - if (unlikely(pfn_is_io(pfn))) - return 0; - - i = pfn >> PFNNID_SHIFT; - BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); - - return pfnnid_map[i]; -} - -static inline int pfn_valid(int pfn) -{ - int nid = pfn_to_nid(pfn); - - if (nid >= 0) - return (pfn < node_end_pfn(nid)); - return 0; -} - -#endif #endif /* _PARISC_MMZONE_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b77f49ce6220..8f4e51071ea1 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -4,20 +4,11 @@ #include <linux/const.h> -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) -# define PAGE_SHIFT 12 -#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) -# define PAGE_SHIFT 14 -#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) -# define PAGE_SHIFT 16 -#else -# error "unknown default kernel page size" -#endif -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> #include <asm/cache.h> @@ -26,12 +17,14 @@ #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from)) struct page; +struct vm_area_struct; void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg); +void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, + struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE /* * These are used to make use of C type-checking.. @@ -42,48 +35,54 @@ typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */ /* NOTE: even on 64 bits, these entries are __u32 because we allocate * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ -typedef struct { __u32 pmd; } pmd_t; typedef struct { __u32 pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; -#define pte_val(x) ((x).pte) -/* These do not work lvalues, so make sure we don't use them as such. */ +#if CONFIG_PGTABLE_LEVELS == 3 +typedef struct { __u32 pmd; } pmd_t; +#define __pmd(x) ((pmd_t) { (x) } ) +/* pXd_val() do not work as lvalues, so make sure we don't use them as such. */ #define pmd_val(x) ((x).pmd + 0) +#endif + +#define pte_val(x) ((x).pte) #define pgd_val(x) ((x).pgd + 0) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -#define __pmd_val_set(x,n) (x).pmd = (n) -#define __pgd_val_set(x,n) (x).pgd = (n) - #else /* * .. while these make it easier on the compiler */ typedef unsigned long pte_t; + +#if CONFIG_PGTABLE_LEVELS == 3 typedef __u32 pmd_t; +#define pmd_val(x) (x) +#define __pmd(x) (x) +#endif + typedef __u32 pgd_t; typedef unsigned long pgprot_t; #define pte_val(x) (x) -#define pmd_val(x) (x) #define pgd_val(x) (x) #define pgprot_val(x) (x) #define __pte(x) (x) -#define __pmd(x) (x) #define __pgd(x) (x) #define __pgprot(x) (x) -#define __pmd_val_set(x,n) (x) = (n) -#define __pgd_val_set(x,n) (x) = (n) - #endif /* STRICT_MM_TYPECHECKS */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) +#if CONFIG_PGTABLE_LEVELS == 3 +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval)) +#endif + typedef struct page *pgtable_t; typedef struct __physmem_range { @@ -94,7 +93,7 @@ typedef struct __physmem_range { extern physmem_range_t pmem_ranges[]; extern int npmem_ranges; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* WARNING: The definitions below must match exactly to sizeof(pte_t) * etc @@ -106,7 +105,7 @@ extern int npmem_ranges; #else #define BITS_PER_PTE_ENTRY 2 #define BITS_PER_PMD_ENTRY 2 -#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY +#define BITS_PER_PGD_ENTRY 2 #endif #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) @@ -140,17 +139,13 @@ extern int npmem_ranges; #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000) /* These macros don't work for 64-bit C code -- don't allow in C at all */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ # define PA(x) ((x)-__PAGE_OFFSET) # define VA(x) ((x)+__PAGE_OFFSET) #endif #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#ifndef CONFIG_DISCONTIGMEM -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif /* CONFIG_DISCONTIGMEM */ - #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) @@ -171,17 +166,13 @@ extern int npmem_ranges; #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> #include <asm/pdc.h> -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) +#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET)) /* DEFINITION OF THE ZERO-PAGE (PAG0) */ /* based on work by Jason Eckhardt (jason@equator.com) */ diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h index d02d144c6012..9e74cef4d774 100644 --- a/arch/parisc/include/asm/parisc-device.h +++ b/arch/parisc/include/asm/parisc-device.h @@ -34,14 +34,14 @@ struct parisc_driver { struct parisc_driver *next; char *name; const struct parisc_device_id *id_table; - int (*probe) (struct parisc_device *dev); /* New device discovered */ - int (*remove) (struct parisc_device *dev); + int (*probe)(struct parisc_device *dev); /* New device discovered */ + void (*remove)(struct parisc_device *dev); struct device_driver drv; }; #define to_parisc_device(d) container_of(d, struct parisc_device, dev) -#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) +#define to_parisc_driver(d) container_of_const(d, struct parisc_driver, drv) #define parisc_parent(d) to_parisc_device(d->dev.parent) static inline const char *parisc_pathname(struct parisc_device *d) @@ -61,7 +61,7 @@ parisc_get_drvdata(struct parisc_device *d) return dev_get_drvdata(&d->dev); } -extern struct bus_type parisc_bus_type; +extern const struct bus_type parisc_bus_type; int iosapic_serial_irq(struct parisc_device *dev); diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3328fd17c19d..127ed5021ae3 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -56,7 +56,7 @@ struct pci_hba_data { #define DINO_MAX_LMMIO_RESOURCES 3 unsigned long lmmio_space_offset; /* CPU view - PCI view */ - void * iommu; /* IOMMU this device is under */ + struct ioc *iommu; /* IOMMU this device is under */ /* REVISIT - spinlock to protect resources? */ #define HBA_NAME_SIZE 16 @@ -66,8 +66,6 @@ struct pci_hba_data { char gmmio_name[HBA_NAME_SIZE]; }; -#define HBA_DATA(d) ((struct pci_hba_data *) (d)) - /* ** We support 2^16 I/O ports per HBA. These are set up in the form ** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port @@ -164,11 +162,6 @@ extern void pcibios_init_bridge(struct pci_dev *); #define PCIBIOS_MIN_IO 0x10 #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return channel ? 15 : 14; -} - #define HAVE_PCI_MMAP #define ARCH_GENERIC_PCI_MMAP_RESOURCE diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 5b187d40d604..6080a1516b34 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -4,7 +4,7 @@ #include <uapi/asm/pdc.h> -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) extern int parisc_narrow_firmware; @@ -19,9 +19,6 @@ extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */ #define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */ #define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */ -void pdc_console_init(void); /* in pdc_console.c */ -void pdc_console_restart(void); - void setup_pdc(void); /* in inventory.c */ /* wrapper-functions from pdc.c */ @@ -40,16 +37,20 @@ int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info, long mod_index, long addr_index); int pdc_model_info(struct pdc_model *model); -int pdc_model_sysmodel(char *name); +int pdc_model_sysmodel(unsigned int os_id, char *name); int pdc_model_cpuid(unsigned long *cpu_id); int pdc_model_versions(unsigned long *versions, int id); int pdc_model_capabilities(unsigned long *capabilities); +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no); int pdc_cache_info(struct pdc_cache_info *cache); int pdc_spaceid_bits(unsigned long *space_bits); -#ifndef CONFIG_PA20 int pdc_btlb_info(struct pdc_btlb_info *btlb); +int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len, + unsigned long entry_info, unsigned long slot); +int pdc_btlb_purge_all(void); int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path); -#endif /* !CONFIG_PA20 */ +int pdc_pim_toc11(struct pdc_toc_pim_11 *ret); +int pdc_pim_toc20(struct pdc_toc_pim_20 *ret); int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa); int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count); @@ -80,6 +81,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap); int pdc_do_reset(void); int pdc_soft_power_info(unsigned long *power_reg); int pdc_soft_power_button(int sw_control); +int pdc_soft_power_button_panic(int sw_control); void pdc_io_reset(void); void pdc_io_reset_devices(void); int pdc_iodc_getc(void); @@ -87,8 +89,12 @@ int pdc_iodc_print(const unsigned char *str, unsigned count); void pdc_emergency_unlock(void); int pdc_sti_call(unsigned long func, unsigned long flags, - unsigned long inptr, unsigned long outputr, - unsigned long glob_cfg); + unsigned long inptr, unsigned long outputr, + unsigned long glob_cfg, int do_call64); + +int __pdc_cpu_rendezvous(void); +void pdc_cpu_rendezvous_lock(void); +void pdc_cpu_rendezvous_unlock(void); static inline char * os_id_to_string(u16 os_id) { switch(os_id) { @@ -103,5 +109,5 @@ static inline char * os_id_to_string(u16 os_id) { } } -#endif /* !defined(__ASSEMBLY__) */ +#endif /* !defined(__ASSEMBLER__) */ #endif /* _PARISC_PDC_H */ diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h index a609273dc6bf..d6d82f53d3d0 100644 --- a/arch/parisc/include/asm/pdc_chassis.h +++ b/arch/parisc/include/asm/pdc_chassis.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/asm-parisc/pdc_chassis.h * * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr> * Copyright (C) 2002 Thibaut Varene <varenet@parisc-linux.org> * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * TODO: - handle processor number on SMP systems (Reporting Entity ID) * - handle message ID * - handle timestamps @@ -378,4 +365,3 @@ void parisc_pdc_chassis_init(void); PDC_CHASSIS_EOM_SET ) #endif /* _PARISC_PDC_CHASSIS_H */ -/* vim: set ts=8 */ diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index bce9ee1c1c99..84ac81b1adde 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -67,6 +67,10 @@ #define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */ +/* PDC PAT COMPLEX */ + +#define PDC_PAT_COMPLEX 66L + /* PDC PAT CPU -- CPU configuration within the protection domain */ #define PDC_PAT_CPU 67L @@ -79,6 +83,7 @@ #define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */ #define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */ #define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */ +#define PDC_PAT_CPU_GET_PDC_ENTRYPOINT 11L /* Return PDC Entry point */ #define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */ #define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache * Cleansing Mode */ @@ -205,7 +210,7 @@ #define PDC_PAT_SYSTEM_INFO 76L /* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #ifdef CONFIG_64BIT @@ -352,7 +357,7 @@ struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */ typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t; - +extern int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry); extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info, @@ -384,6 +389,6 @@ extern int pdc_pat_mem_get_dimm_phys_location( struct pdc_pat_mem_phys_mem_location *pret, unsigned long phys_addr); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* ! __PARISC_PATPDC_H */ diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h index 1e0fd8ba6c03..8a2925029d15 100644 --- a/arch/parisc/include/asm/perf_event.h +++ b/arch/parisc/include/asm/perf_event.h @@ -1,6 +1,12 @@ #ifndef __ASM_PARISC_PERF_EVENT_H #define __ASM_PARISC_PERF_EVENT_H -/* Empty, just to avoid compiling error */ +#include <asm/psw.h> + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->gr[0] = KERNEL_PSW; \ + (regs)->iaoq[0] = (__ip); \ + asm volatile("copy %%sp, %0\n":"=r"((regs)->gr[30])); \ +} #endif /* __ASM_PARISC_PERF_EVENT_H */ diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index d05c678c77c4..3b84ee93edaa 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -10,148 +10,51 @@ #include <asm/cache.h> -/* Allocate the top level pgd (page directory) - * - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we - * allocate the first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd sizes are - * arranged so that a single pmd covers 4GB (giving a full 64-bit - * process access to 8TB) so our lookups are effectively L2 for the - * first 4GB of the kernel (i.e. for all ILP32 processes and all the - * kernel for machines with under 4GB of memory) */ -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, - PGD_ALLOC_ORDER); - pgd_t *actual_pgd = pgd; - - if (likely(pgd != NULL)) { - memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); -#if CONFIG_PGTABLE_LEVELS == 3 - actual_pgd += PTRS_PER_PGD; - /* Populate first pmd with allocated memory. We mark it - * with PxD_FLAG_ATTACHED as a signal to the system that this - * pmd entry may not be cleared. */ - __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as - * a signal that this pmd may not be freed */ - __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); -#endif - } - return actual_pgd; -} +#define __HAVE_ARCH_PMD_ALLOC_ONE +#include <asm-generic/pgalloc.h> -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +/* Allocate the top level pgd (page directory) */ +static inline pgd_t *pgd_alloc(struct mm_struct *mm) { -#if CONFIG_PGTABLE_LEVELS == 3 - pgd -= PTRS_PER_PGD; -#endif - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); + return __pgd_alloc(mm, PGD_TABLE_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 /* Three Level Page Table Support for pmd's */ -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { - __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + - (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); + set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); - if (pmd) - memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); - return pmd; -} + struct ptdesc *ptdesc; + gfp_t gfp = GFP_PGTABLE_USER; -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { - /* - * This is the permanent pmd attached to the pgd; - * cannot free it. - * Increment the counter to compensate for the decrement - * done by generic mm code. - */ - mm_inc_nr_pmds(mm); - return; + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER); + if (!ptdesc) + return NULL; + if (!pagetable_pmd_ctor(mm, ptdesc)) { + pagetable_free(ptdesc); + return NULL; } - free_pages((unsigned long)pmd, PMD_ORDER); + return ptdesc_address(ptdesc); } - -#else - -/* Two Level Page Table Support for pmd's */ - -/* - * allocating and freeing a pmd is trivial: the 1-entry pmd is - * inside the pgd, so has no extra memory associated with it. - */ - -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(mm, x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() - #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#if CONFIG_PGTABLE_LEVELS == 3 - /* preserve the gateway marker if this is the beginning of - * the permanent pmd */ - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); - else -#endif - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); } #define pmd_populate(mm, pmd, pte_page) \ pmd_populate_kernel(mm, pmd, page_address(pte_page)) -#define pmd_pgtable(pmd) pmd_page(pmd) - -static inline pgtable_t -pte_alloc_one(struct mm_struct *mm) -{ - struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - return page; -} - -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm) -{ - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - return pte; -} - -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, struct page *pte) -{ - pgtable_page_dtor(pte); - pte_free_kernel(mm, page_address(pte)); -} - -#define check_pgt_cache() do { } while (0) #endif diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index c7bb74e22436..2c139a4dbf4b 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -2,11 +2,17 @@ #ifndef _PARISC_PGTABLE_H #define _PARISC_PGTABLE_H -#include <asm-generic/4level-fixup.h> +#include <asm/page.h> + +#if CONFIG_PGTABLE_LEVELS == 3 +#include <asm-generic/pgtable-nopud.h> +#elif CONFIG_PGTABLE_LEVELS == 2 +#include <asm-generic/pgtable-nopmd.h> +#endif #include <asm/fixmap.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * we simulate an x86-style page table for the linux mm code */ @@ -17,63 +23,64 @@ #include <asm/processor.h> #include <asm/cache.h> -extern spinlock_t pa_tlb_lock; +/* This is for the serialization of PxTLB broadcasts. At least on the N class + * systems, only one PxTLB inter processor broadcast can be active at any one + * time on the Merced bus. */ +extern spinlock_t pa_tlb_flush_lock; +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) +extern int pa_serialize_tlb_flushes; +#else +#define pa_serialize_tlb_flushes (0) +#endif -/* - * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel - * memory. For the return value to be meaningful, ADDR must be >= - * PAGE_OFFSET. This operation can be relatively expensive (e.g., - * require a hash-, or multi-level tree-lookup or something of that - * sort) but it guarantees to return TRUE only if accessing the page - * at that address does not cause an error. Note that there may be - * addresses for which kern_addr_valid() returns FALSE even though an - * access would not cause an error (e.g., this is typically true for - * memory mapped I/O regions. - * - * XXX Need to implement this for parisc. - */ -#define kern_addr_valid(addr) (1) +#define purge_tlb_start(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_save(flags); \ + } while (0) +#define purge_tlb_end(flags) do { \ + if (pa_serialize_tlb_flushes) \ + spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ + else \ + local_irq_restore(flags); \ + } while (0) -/* Purge data and instruction TLB entries. Must be called holding - * the pa_tlb_lock. The TLB purge instructions are slow on SMP - * machines since the purge must be broadcast to all CPUs. +/* Purge data and instruction TLB entries. The TLB purge instructions + * are slow on SMP machines since the purge must be broadcast to all CPUs. */ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { - mtsp(mm->context, 1); - pdtlb(addr); - pitlb(addr); + unsigned long flags; + + purge_tlb_start(flags); + mtsp(mm->context.space_id, SR_TEMP1); + pdtlb(SR_TEMP1, addr); + pitlb(SR_TEMP1, addr); + purge_tlb_end(flags); } +extern void __update_cache(pte_t pte); + /* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) \ - do{ \ - *(pteptr) = (pteval); \ - } while(0) - -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ - pte_t old_pte; \ - unsigned long flags; \ - spin_lock_irqsave(&pa_tlb_lock, flags); \ - old_pte = *ptep; \ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(&pa_tlb_lock, flags); \ - } while (0) +#define set_pte(pteptr, pteval) \ + do { \ + *(pteptr) = (pteval); \ + mb(); \ + } while(0) -#endif /* !__ASSEMBLY__ */ - -#include <asm/page.h> +#endif /* !__ASSEMBLER__ */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#if CONFIG_PGTABLE_LEVELS == 3 #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) +#endif #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) @@ -86,12 +93,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #if CONFIG_PGTABLE_LEVELS == 3 -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ +#define PMD_TABLE_ORDER 1 +#define PGD_TABLE_ORDER 0 #else -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER PGD_ORDER +#define PGD_TABLE_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -103,25 +108,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define PTRS_PER_PTE (1UL << BITS_PER_PTE) /* Definitions for 2nd level */ -#define pgtable_cache_init() do { } while (0) - +#if CONFIG_PGTABLE_LEVELS == 3 #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#if CONFIG_PGTABLE_LEVELS == 3 -#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) +#define BITS_PER_PMD (PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY) +#define PTRS_PER_PMD (1UL << BITS_PER_PMD) #else -#define __PAGETABLE_PMD_FOLDED 1 #define BITS_PER_PMD 0 #endif -#define PTRS_PER_PMD (1UL << BITS_PER_PMD) /* Definitions for 1st level */ -#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) -#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG +#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) +#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) #else -#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) +#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) @@ -134,14 +136,14 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define SPACEID_SHIFT (MAX_ADDRBITS - 32) #else #define MAX_ADDRBITS (BITS_PER_LONG) -#define MAX_ADDRESS (1UL << MAX_ADDRBITS) +#define MAX_ADDRESS (1ULL << MAX_ADDRBITS) #define SPACEID_SHIFT 0 #endif /* This calculates the number of initial pages we need for the initial * page tables */ -#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) -# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE) +# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE)) #else # define PT_INITIAL (1) /* all initial PTEs fit into one page */ #endif @@ -150,8 +152,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) * pgd entries used up by user/kernel: */ -#define FIRST_USER_ADDRESS 0UL - /* NB: The tlb miss handlers make certain assumptions about the order */ /* of the following bits, so be careful (One example, bits 25-31 */ /* are moved together in one instruction). */ @@ -168,6 +168,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ +#ifdef CONFIG_HUGETLB_PAGE +#define _PAGE_SPECIAL_BIT _PAGE_DMB_BIT /* DMB feature is currently unused */ +#else +#define _PAGE_SPECIAL_BIT _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */ +#endif /* N.B. The bits are defined in terms of a 32 bit word above, so the */ /* following macro is ok for both 32 and 64 bit. */ @@ -195,31 +200,33 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) +#define _PAGE_SPECIAL (1 << xlate_pabit(_PAGE_SPECIAL_BIT)) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE) #define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE) +/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED + /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds * are page-aligned, we don't care about the PAGE_OFFSET bits, except * for a few meta-information bits, so we shift the address to be * able to effectively address 40/42/44-bits of physical address space * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 -#define _PxD_ATTACHED_BIT 30 -#define _PxD_VALID_BIT 29 +#define _PxD_VALID_BIT 30 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) -#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) @@ -248,24 +255,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 __P000 /* copy on write */ -#define __P011 __P001 /* copy on write */ -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 __P100 /* copy on write */ -#define __P111 __P101 /* copy on write */ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX - extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ @@ -286,63 +275,37 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) +#define pte_user(x) (pte_val(x) & _PAGE_USER) +#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0)) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) +#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK) +#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#if CONFIG_PGTABLE_LEVELS == 3 -/* The first entry of the permanent pmd is not there if it contains - * the gateway marker */ -#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) -#else #define pmd_none(x) (!pmd_val(x)) -#endif #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#if CONFIG_PGTABLE_LEVELS == 3 - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - /* This is the entry pointing to the permanent pmd - * attached to the pgd; cannot clear it */ - __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); - else -#endif - __pmd_val_set(*pmd, 0); + set_pmd(pmd, __pmd(0)); } #if CONFIG_PGTABLE_LEVELS == 3 -#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) -#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud))) +#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) /* For 64 bit we have three level tables */ -#define pgd_none(x) (!pgd_val(x)) -#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) -#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) -static inline void pgd_clear(pgd_t *pgd) { -#if CONFIG_PGTABLE_LEVELS == 3 - if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) - /* This is the permanent pmd attached to the pgd; cannot - * free it */ - return; -#endif - __pgd_val_set(*pgd, 0); +#define pud_none(x) (!pud_val(x)) +#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) +#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) +static inline void pud_clear(pud_t *pud) { + set_pud(pud, __pud(0)); } -#else -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline void pgd_clear(pgd_t * pgdp) { } #endif /* @@ -352,15 +315,15 @@ static inline void pgd_clear(pgd_t * pgdp) { } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } -static inline int pte_special(pte_t pte) { return 0; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } /* * Huge pte definitions. @@ -375,10 +338,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #endif -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ #define __mk_pte(addr,pgprot) \ ({ \ pte_t __pte; \ @@ -388,8 +347,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } __pte; \ }) -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { pte_t pte; @@ -406,109 +363,116 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return ((unsigned long) __va(pmd_address(pmd))); +} +#define pmd_pfn(pmd) (pmd_address(pmd) >> PAGE_SHIFT) #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) -#define pgd_index(address) ((address) >> PGDIR_SHIFT) - -/* to find an entry in a page-table-directory */ -#define pgd_offset(mm, address) \ -((mm)->pgd + ((address) >> PGDIR_SHIFT)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - /* Find an entry in the second-level page table.. */ -#if CONFIG_PGTABLE_LEVELS == 3 -#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) -#define pmd_offset(dir,address) \ -((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) -#else -#define pmd_offset(dir,addr) ((pmd_t *) dir) -#endif - -/* Find an entry in the third-level page table.. */ -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) -#define pte_offset_kernel(pmd, address) \ - ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) -#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) -#define pte_unmap(pte) do { } while (0) - -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - extern void paging_init (void); +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + if (pte_present(pte) && pte_user(pte)) + __update_cache(pte); + for (;;) { + *ptep = pte; + purge_tlb_entries(mm, addr); + if (--nr == 0) + break; + ptep++; + pte_val(pte) += 1 << PFN_PTE_SHIFT; + addr += PAGE_SIZE; + } +} +#define set_ptes set_ptes +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) + /* Used for deferring calls to flush_dcache_page() */ #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); - -/* Encode and de-code a swap entry */ +#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) __update_cache(*ptep) +#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep) +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs (32bit): + * + * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * <---------------- offset -----------------> P E <ofs> < type -> + * + * E is the exclusive marker that is not stored in swap entries. + * _PAGE_PRESENT (P) must be 0. + * + * For the 64bit version, the offset is extended by 32bit. + */ #define __swp_type(x) ((x).val & 0x1f) -#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ - (((x).val >> 8) & ~0x7) ) -#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ - ((offset & 0x7) << 6) | \ - ((offset & ~0x7) << 8) }) +#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \ + (((x).val >> 10) << 3) ) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) & 0x1f) | \ + ((offset & 0x7) << 5) | \ + ((offset >> 3) << 10) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#define ptep_get ptep_get + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - unsigned long flags; - if (!pte_young(*ptep)) - return 0; - - spin_lock_irqsave(&pa_tlb_lock, flags); - pte = *ptep; + pte = ptep_get(ptep); if (!pte_young(pte)) { - spin_unlock_irqrestore(&pa_tlb_lock, flags); return 0; } - set_pte(ptep, pte_mkold(pte)); - purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; } -struct mm_struct; -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t old_pte; - unsigned long flags; - - spin_lock_irqsave(&pa_tlb_lock, flags); - old_pte = *ptep; - set_pte(ptep, __pte(0)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); - - return old_pte; -} +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; - spin_lock_irqsave(&pa_tlb_lock, flags); - set_pte(ptep, pte_wrprotect(*ptep)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(&pa_tlb_lock, flags); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) -struct seq_file; -extern void arch_report_meminfo(struct seq_file *m); - -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* TLB page size encoding - see table 3-1 in parisc20.pdf */ @@ -538,9 +502,9 @@ extern void arch_report_meminfo(struct seq_file *m); #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#include <asm-generic/pgtable.h> #endif /* _PARISC_PGTABLE_H */ diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h index 6e63f720024d..748eefb27c68 100644 --- a/arch/parisc/include/asm/prefetch.h +++ b/arch/parisc/include/asm/prefetch.h @@ -16,7 +16,7 @@ #ifndef __ASM_PARISC_PREFETCH_H #define __ASM_PARISC_PREFETCH_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_PREFETCH #define ARCH_HAS_PREFETCH @@ -40,6 +40,6 @@ static inline void prefetchw(const void *addr) #endif /* CONFIG_PA20 */ #endif /* CONFIG_PREFETCH */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 6e2a8176b0dd..dd0b5e199559 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -9,16 +9,18 @@ #ifndef __ASM_PARISC_PROCESSOR_H #define __ASM_PARISC_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/threads.h> +#include <linux/irqreturn.h> +#include <asm/assembly.h> #include <asm/prefetch.h> #include <asm/hardware.h> #include <asm/pdc.h> #include <asm/ptrace.h> #include <asm/types.h> #include <asm/percpu.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define HAVE_ARCH_PICK_MMAP_LAYOUT @@ -37,22 +39,17 @@ #define DEFAULT_MAP_BASE DEFAULT_MAP_BASE32 #endif -#ifdef __KERNEL__ - /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc. * prumpf */ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE -/* Allow bigger stacks for 64-bit processes */ -#define STACK_SIZE_MAX (USER_WIDE_MODE \ - ? (1 << 30) /* 1 GB */ \ - : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) - -#endif +#ifndef __ASSEMBLER__ -#ifndef __ASSEMBLY__ +struct rlimit; +unsigned long mmap_upper_limit(const struct rlimit *rlim_stack); +unsigned long calc_max_stack_size(unsigned long stack_max); /* * Data detected about CPUs at boot time which is the same for all CPU's. @@ -97,20 +94,14 @@ struct cpuinfo_parisc { unsigned long cpu_loc; /* CPU location from PAT firmware */ unsigned int state; struct parisc_device *dev; - unsigned long loops_per_jiffy; }; extern struct system_cpuinfo_parisc boot_cpu_data; DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); +extern int time_keeper_id; /* CPU used for timekeeping */ #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) -typedef struct { - int seg; -} mm_segment_t; - -#define ARCH_MIN_TASKALIGN 8 - struct thread_struct { struct pt_regs regs; unsigned long task_size; @@ -249,7 +240,7 @@ on downward growing arches, it looks like this: #define start_thread(regs, new_pc, new_sp) do { \ elf_addr_t *sp = (elf_addr_t *)new_sp; \ - __u32 spaceid = (__u32)current->mm->context; \ + __u32 spaceid = (__u32)current->mm->context.space_id; \ elf_addr_t pc = (elf_addr_t)new_pc | 3; \ elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \ \ @@ -276,13 +267,9 @@ on downward growing arches, it looks like this: regs->gr[23] = 0; \ } while(0) -struct task_struct; struct mm_struct; -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -extern unsigned long get_wchan(struct task_struct *p); +extern unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0]) #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) @@ -302,7 +289,42 @@ extern int _parisc_requires_coherency; #endif extern int running_on_qemu; - -#endif /* __ASSEMBLY__ */ +extern int parisc_narrow_firmware; + +extern void __noreturn toc_intr(struct pt_regs *regs); +extern void toc_handler(void); +extern unsigned int toc_handler_size; +extern unsigned int toc_handler_csum; +extern void do_cpu_irq_mask(struct pt_regs *); +extern irqreturn_t timer_interrupt(int, void *); +extern irqreturn_t ipi_interrupt(int, void *); +extern void parisc_clockevent_init(void); +extern void handle_interruption(int, struct pt_regs *); + +/* called from assembly code: */ +extern void start_parisc(void); +extern void smp_callin(unsigned long); +extern void sys_rt_sigreturn(struct pt_regs *, int); +extern void do_notify_resume(struct pt_regs *, long); +extern long do_syscall_trace_enter(struct pt_regs *); +extern void do_syscall_trace_exit(struct pt_regs *); + +/* CPU startup and info */ +struct seq_file; +extern void early_trap_init(void); +extern void collect_boot_cpu_data(void); +extern void btlb_init_per_cpu(void); +extern int show_cpuinfo (struct seq_file *m, void *v); + +/* driver code in driver/parisc */ +extern void processor_init(void); +struct parisc_device; +struct resource; +extern void sba_distributed_lmmio(struct parisc_device *, struct resource *); +extern void sba_directed_lmmio(struct parisc_device *, struct resource *); +extern void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask); +extern void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp); + +#endif /* __ASSEMBLER__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h index 76c301146c31..9140e1ab7e63 100644 --- a/arch/parisc/include/asm/psw.h +++ b/arch/parisc/include/asm/psw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PARISC_PSW_H - +#define _PARISC_PSW_H #define PSW_I 0x00000001 #define PSW_D 0x00000002 @@ -60,7 +60,7 @@ #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* The program status word as bitfields. */ struct pa_psw { @@ -99,6 +99,6 @@ struct pa_psw { #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) #endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 2a27b275ab09..eea3f3df0823 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -5,30 +5,31 @@ #ifndef _PARISC_PTRACE_H #define _PARISC_PTRACE_H +#include <asm/assembly.h> #include <uapi/asm/ptrace.h> - #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS)) #define arch_has_single_step() 1 #define arch_has_block_step() 1 /* XXX should we use iaoq[1] or iaoq[0] ? */ -#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) -#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) +#define user_mode(regs) (((regs)->iaoq[0] & 3) != PRIV_KERNEL) +#define user_space(regs) ((regs)->iasq[1] != PRIV_KERNEL) #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) #define user_stack_pointer(regs) ((regs)->gr[30]) unsigned long profile_pc(struct pt_regs *); static inline unsigned long regs_return_value(struct pt_regs *regs) { - return regs->gr[20]; + return regs->gr[28]; } static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { - regs->iaoq[0] = val; + regs->iaoq[0] = val; + regs->iaoq[1] = val + 4; } /* Query offset/name of register from its name/offset */ @@ -36,4 +37,17 @@ extern int regs_query_register_offset(const char *name); extern const char *regs_query_register_name(unsigned int offset); #define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw)) +#define kernel_stack_pointer(regs) ((regs)->gr[30]) + +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + return *(unsigned long *)((unsigned long)regs + offset); +} + +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); +int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); + #endif diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h index 8e51c775c80a..e2d2d7e9bfde 100644 --- a/arch/parisc/include/asm/ropes.h +++ b/arch/parisc/include/asm/ropes.h @@ -29,7 +29,7 @@ struct ioc { void __iomem *ioc_hpa; /* I/O MMU base address */ char *res_map; /* resource map, bit == pdir entry */ - u64 *pdir_base; /* physical base address */ + __le64 *pdir_base; /* physical base address */ unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ #ifdef ZX1_SUPPORT @@ -86,6 +86,9 @@ struct sba_device { struct ioc ioc[MAX_IOC]; }; +/* list of SBA's in system, see drivers/parisc/sba_iommu.c */ +extern struct sba_device *sba_list; + #define ASTRO_RUNWAY_PORT 0x582 #define IKE_MERCED_PORT 0x803 #define REO_MERCED_PORT 0x804 @@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) { #define SBA_PDIR_VALID_BIT 0x8000000000000000ULL -#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL +#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL #define SBA_FUNC_ID 0x0000 /* function id */ #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ @@ -252,7 +255,7 @@ static inline int agp_mode_mercury(void __iomem *hpa) { ** fixup_irq is to initialize PCI IRQ line support and ** virtualize pcidev->irq value. To be called by pci_fixup_bus(). */ -extern void *iosapic_register(unsigned long hpa); +extern void *iosapic_register(unsigned long hpa, void __iomem *vaddr); extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev); #define LBA_FUNC_ID 0x0000 /* function id */ diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h index 2b3010ade00e..bb7fb4153327 100644 --- a/arch/parisc/include/asm/rt_sigframe.h +++ b/arch/parisc/include/asm/rt_sigframe.h @@ -2,16 +2,8 @@ #ifndef _ASM_PARISC_RT_SIGFRAME_H #define _ASM_PARISC_RT_SIGFRAME_H -#define SIGRETURN_TRAMP 4 -#define SIGRESTARTBLOCK_TRAMP 5 -#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) - struct rt_sigframe { - /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c - Secondary to that it must protect the ERESTART_RESTARTBLOCK - trampoline we left on the stack (we were bad and didn't - change sp so we could run really fast.) */ - unsigned int tramp[TRAMP_SIZE]; + unsigned int tramp[2]; /* holds original return address */ struct siginfo info; struct ucontext uc; }; diff --git a/arch/parisc/include/asm/runway.h b/arch/parisc/include/asm/runway.h index f3cfe69439f6..2837f0223d6d 100644 --- a/arch/parisc/include/asm/runway.h +++ b/arch/parisc/include/asm/runway.h @@ -1,13 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASM_PARISC_RUNWAY_H #define ASM_PARISC_RUNWAY_H -#ifdef __KERNEL__ - -/* declared in arch/parisc/kernel/setup.c */ -extern struct proc_dir_entry * proc_runway_root; #define RUNWAY_STATUS 0x10 #define RUNWAY_DEBUG 0x40 -#endif /* __KERNEL__ */ #endif /* ASM_PARISC_RUNWAY_H */ diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h new file mode 100644 index 000000000000..b058b2220322 --- /dev/null +++ b/arch/parisc/include/asm/seccomp.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm-generic/seccomp.h> + +#ifdef CONFIG_64BIT +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_PARISC64 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "parisc64" +# ifdef CONFIG_COMPAT +# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_PARISC +# define SECCOMP_ARCH_COMPAT_NR NR_syscalls +# define SECCOMP_ARCH_COMPAT_NAME "parisc" +# endif +#else /* !CONFIG_64BIT */ +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_PARISC +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "parisc" +#endif + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h index bb52aea0cb21..33df42b5cc6d 100644 --- a/arch/parisc/include/asm/sections.h +++ b/arch/parisc/include/asm/sections.h @@ -2,20 +2,14 @@ #ifndef _PARISC_SECTIONS_H #define _PARISC_SECTIONS_H +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS +#include <asm/elf.h> +typedef Elf64_Fdesc func_desc_t; +#endif + /* nothing to see, move along */ #include <asm-generic/sections.h> extern char __alt_instructions[], __alt_instructions_end[]; -#ifdef CONFIG_64BIT - -#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 - -#undef dereference_function_descriptor -void *dereference_function_descriptor(void *); - -#undef dereference_kernel_function_descriptor -void *dereference_kernel_function_descriptor(void *); -#endif - #endif diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h index 74f74e4d35b7..5a95b0f62b87 100644 --- a/arch/parisc/include/asm/shmparam.h +++ b/arch/parisc/include/asm/shmparam.h @@ -2,6 +2,21 @@ #ifndef _ASMPARISC_SHMPARAM_H #define _ASMPARISC_SHMPARAM_H +/* + * PA-RISC uses virtually indexed & physically tagged (VIPT) caches + * which has strict requirements when two pages to the same physical + * address are accessed through different mappings. Read the section + * "Address Aliasing" in the arch docs for more detail: + * PA-RISC 1.1 (page 3-6): + * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf + * PA-RISC 2.0 (page F-5): + * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf + * + * For Linux we allow kernel and userspace to map pages on page size + * granularity (SHMLBA) but have to ensure that, if two pages are + * mapped to the same physical address, the virtual and physical + * addresses modulo SHM_COLOUR are identical. + */ #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #define SHM_COLOUR 0x00400000 /* shared mappings colouring */ diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index 715c96ba2ec8..85c3d7409bbc 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -4,24 +4,12 @@ #include <uapi/asm/signal.h> -#define _NSIG 64 -/* bits-per-word, where word apparently means 'long' not 'int' */ -#define _NSIG_BPW BITS_PER_LONG -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -# ifndef __ASSEMBLY__ +# ifndef __ASSEMBLER__ /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - /* next_signal() assumes this is a long - no choice */ - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - #include <asm/sigcontext.h> -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_PARISC_SIGNAL_H */ diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index b9a18db4b05a..0cf1c3a2696a 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -12,7 +12,7 @@ extern int init_per_cpu(int cpuid); #define PDC_OS_BOOT_RENDEZVOUS 0x10 #define PDC_OS_BOOT_RENDEZVOUS_HI 0x28 -#ifndef ASSEMBLY +#ifndef __ASSEMBLER__ #include <linux/bitops.h> #include <linux/threads.h> /* for NR_CPUS */ #include <linux/cpumask.h> @@ -32,9 +32,9 @@ extern void smp_send_all_nop(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#endif /* !ASSEMBLY */ +#define raw_smp_processor_id() (current_thread_info()->cpu) -#define raw_smp_processor_id() (current_thread_info()->cpu) +#endif /* !__ASSEMBLER__ */ #else /* CONFIG_SMP */ @@ -44,12 +44,7 @@ static inline void smp_send_all_nop(void) { return; } #define NO_PROC_ID 0xFF /* No processor magic marker */ #define ANY_PROC_ID 0xFF /* Any processor magic marker */ -static inline int __cpu_disable (void) { - return 0; -} -static inline void __cpu_die (unsigned int cpu) { - while(1) - ; -} +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); #endif /* __ASM_SMP_H */ diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 79feff1b0721..33500c9f6e5e 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h @@ -4,8 +4,8 @@ #include <uapi/asm/socket.h> -/* O_NONBLOCK clashes with the bits used for socket types. Therefore we - * have to define SOCK_NONBLOCK to a different value here. +/* O_NONBLOCK clashed with the bits used for socket types. Therefore we + * had to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 diff --git a/arch/parisc/include/asm/sparsemem.h b/arch/parisc/include/asm/sparsemem.h new file mode 100644 index 000000000000..b5c3a79045b4 --- /dev/null +++ b/arch/parisc/include/asm/sparsemem.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_PARISC_SPARSEMEM_H +#define ASM_PARISC_SPARSEMEM_H + +/* We have these possible memory map layouts: + * Astro: 0-3.75, 67.75-68, 4-64 + * zx1: 0-1, 257-260, 4-256 + * Stretch (N-class): 0-2, 4-32, 34-xxx + */ + +#define MAX_PHYSMEM_BITS 39 /* 512 GB */ +#define SECTION_SIZE_BITS 27 /* 128 MB */ + +#endif diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index 3d4dd68e181b..1013eeba31e5 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -2,11 +2,73 @@ #ifndef __PARISC_SPECIAL_INSNS_H #define __PARISC_SPECIAL_INSNS_H +#define lpa(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ + "or %%r0,%%r0,%%r0") \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ +}) + +#define lpa_user(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%%sr3,%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ + "or %%r0,%%r0,%%r0") \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ +}) + +/** + * prober_user() - Probe user read access + * @sr: Space regster. + * @va: Virtual address. + * + * Return: Non-zero if address is accessible. + * + * Due to the way _PAGE_READ is handled in TLB entries, we need + * a special check to determine whether a user address is accessible. + * The ldb instruction does the initial access check. If it is + * successful, the probe instruction checks user access rights. + */ +#define prober_user(sr, va) ({ \ + unsigned long read_allowed; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tldb 0(%%sr%1,%2),%%r0\n" \ + "\tproberi (%%sr%1,%2),%3,%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ + "or %%r0,%%r0,%%r0") \ + : "=&r" (read_allowed) \ + : "i" (sr), "r" (va), "i" (PRIV_USER) \ + : "memory" \ + ); \ + read_allowed; \ +}) + +#define CR_EIEM 15 /* External Interrupt Enable Mask */ +#define CR_CR16 16 /* CR16 Interval Timer */ +#define CR_EIRR 23 /* External Interrupt Request Register */ + #define mfctl(reg) ({ \ unsigned long cr; \ __asm__ __volatile__( \ - "mfctl " #reg ",%0" : \ - "=r" (cr) \ + "mfctl %1,%0" : \ + "=r" (cr) : "i" (reg) \ ); \ cr; \ }) @@ -16,19 +78,14 @@ : /* no outputs */ \ : "r" (gr), "i" (cr) : "memory") -/* these are here to de-mystefy the calling code, and to provide hooks */ -/* which I needed for debugging EIEM problems -PB */ -#define get_eiem() mfctl(15) -static inline void set_eiem(unsigned long val) -{ - mtctl(val, 15); -} +#define get_eiem() mfctl(CR_EIEM) +#define set_eiem(val) mtctl(val, CR_EIEM) #define mfsp(reg) ({ \ unsigned long cr; \ __asm__ __volatile__( \ - "mfsp " #reg ",%0" : \ - "=r" (cr) \ + "mfsp %%sr%1,%0" \ + : "=r" (cr) : "i"(reg) \ ); \ cr; \ }) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 8a63515f03bf..0b326e52255e 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -7,163 +7,155 @@ #include <asm/processor.h> #include <asm/spinlock_types.h> -static inline int arch_spin_is_locked(arch_spinlock_t *x) +static inline void arch_spin_val_check(int lock_val) { - volatile unsigned int *a = __ldcw_align(x); - return *a == 0; + if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)) + asm volatile( "andcm,= %0,%1,%%r0\n" + ".word %2\n" + : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL), + "i" (SPINLOCK_BREAK_INSN)); } -#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +static inline int arch_spin_is_locked(arch_spinlock_t *x) +{ + volatile unsigned int *a; + int lock_val; -static inline void arch_spin_lock_flags(arch_spinlock_t *x, - unsigned long flags) + a = __ldcw_align(x); + lock_val = READ_ONCE(*a); + arch_spin_val_check(lock_val); + return (lock_val == 0); +} + +static inline void arch_spin_lock(arch_spinlock_t *x) { volatile unsigned int *a; a = __ldcw_align(x); - while (__ldcw(a) == 0) + do { + int lock_val_old; + + lock_val_old = __ldcw(a); + arch_spin_val_check(lock_val_old); + if (lock_val_old) + return; /* got lock */ + + /* wait until we should try to get lock again */ while (*a == 0) - if (flags & PSW_SM_I) { - local_irq_enable(); - cpu_relax(); - local_irq_disable(); - } else - cpu_relax(); + continue; + } while (1); } -#define arch_spin_lock_flags arch_spin_lock_flags static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; a = __ldcw_align(x); - mb(); - *a = 1; + /* Release with ordered store. */ + __asm__ __volatile__("stw,ma %0,0(%1)" + : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; - int ret; + int lock_val; a = __ldcw_align(x); - ret = __ldcw(a) != 0; - - return ret; + lock_val = __ldcw(a); + arch_spin_val_check(lock_val); + return lock_val != 0; } /* * Read-write spinlocks, allowing multiple readers but only one writer. - * Linux rwlocks are unfair to writers; they can be starved for an indefinite - * time by readers. With care, they can also be taken in interrupt context. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * - * In the PA-RISC implementation, we have a spinlock and a counter. - * Readers use the lock to serialise their access to the counter (which - * records how many readers currently hold the lock). - * Writers hold the spinlock, preventing any readers or other writers from - * grabbing the rwlock. + * The spinlock itself is contained in @counter and access to it is + * serialized with @lock_mutex. */ -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_lock(arch_rwlock_t *rw) +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) { + int ret = 0; unsigned long flags; - local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); -} -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long flags; local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter--; - arch_spin_unlock(&rw->lock); + arch_spin_lock(&(rw->lock_mutex)); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + */ + if (rw->counter > 0) { + rw->counter--; + ret = 1; + } + + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); + + return ret; } -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int arch_read_trylock(arch_rwlock_t *rw) +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) { + int ret = 0; unsigned long flags; - retry: + local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - return 1; + arch_spin_lock(&(rw->lock_mutex)); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + */ + if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + rw->counter = 0; + ret = 1; } - + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - /* If write-locked, we fail to acquire the lock */ - if (rw->counter < 0) - return 0; - /* Wait until we have a realistic chance at the lock */ - while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) + return ret; +} + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + while (!arch_read_trylock(rw)) cpu_relax(); +} - goto retry; +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + while (!arch_write_trylock(rw)) + cpu_relax(); } -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void arch_write_lock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; -retry: - local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - - if (rw->counter != 0) { - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - while (rw->counter != 0) - cpu_relax(); - - goto retry; - } - - rw->counter = -1; /* mark as write-locked */ - mb(); + local_irq_save(flags); + arch_spin_lock(&(rw->lock_mutex)); + rw->counter++; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } -static __inline__ void arch_write_unlock(arch_rwlock_t *rw) -{ - rw->counter = 0; - arch_spin_unlock(&rw->lock); -} - -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int arch_write_trylock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long flags; - int result = 0; local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - if (rw->counter == 0) { - rw->counter = -1; - result = 1; - } else { - /* Read-locked. Oh well. */ - arch_spin_unlock(&rw->lock); - } - } + arch_spin_lock(&(rw->lock_mutex)); + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - - return result; } #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 42979c5704dc..8e6889bc23cc 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -2,21 +2,34 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H +#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46 + +#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ + +#ifndef __ASSEMBLER__ + typedef struct { -#ifdef CONFIG_PA20 - volatile unsigned int slock; -# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } -#else volatile unsigned int lock[4]; -# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } -#endif +# define __ARCH_SPIN_LOCK_UNLOCKED \ + { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \ + __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } } } arch_spinlock_t; + +/* counter: + * Unlocked : 0x0100_0000 + * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) + * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 + */ typedef struct { - arch_spinlock_t lock; - volatile int counter; + arch_spinlock_t lock_mutex; + volatile unsigned int counter; } arch_rwlock_t; -#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } +#endif /* __ASSEMBLER__ */ + +#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 +#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \ + .counter = __ARCH_RW_LOCK_UNLOCKED__ } #endif diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 8bff1a58c97f..c11222798ab2 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h @@ -17,30 +17,42 @@ static inline long syscall_get_nr(struct task_struct *tsk, return regs->gr[20]; } +static inline void syscall_set_nr(struct task_struct *tsk, + struct pt_regs *regs, + int nr) +{ + regs->gr[20] = nr; +} + static inline void syscall_get_arguments(struct task_struct *tsk, - struct pt_regs *regs, unsigned int i, - unsigned int n, unsigned long *args) + struct pt_regs *regs, + unsigned long *args) +{ + args[5] = regs->gr[21]; + args[4] = regs->gr[22]; + args[3] = regs->gr[23]; + args[2] = regs->gr[24]; + args[1] = regs->gr[25]; + args[0] = regs->gr[26]; +} + +static inline void syscall_set_arguments(struct task_struct *tsk, + struct pt_regs *regs, + unsigned long *args) { - BUG_ON(i); + regs->gr[21] = args[5]; + regs->gr[22] = args[4]; + regs->gr[23] = args[3]; + regs->gr[24] = args[2]; + regs->gr[25] = args[1]; + regs->gr[26] = args[0]; +} - switch (n) { - case 6: - args[5] = regs->gr[21]; - case 5: - args[4] = regs->gr[22]; - case 4: - args[3] = regs->gr[23]; - case 3: - args[2] = regs->gr[24]; - case 2: - args[1] = regs->gr[25]; - case 1: - args[0] = regs->gr[26]; - case 0: - break; - default: - BUG(); - } +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->gr[28]; + return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, @@ -62,11 +74,11 @@ static inline void syscall_rollback(struct task_struct *task, /* do nothing */ } -static inline int syscall_get_arch(void) +static inline int syscall_get_arch(struct task_struct *task) { int arch = AUDIT_ARCH_PARISC; #ifdef CONFIG_64BIT - if (!is_compat_task()) + if (!__is_compat_task(task)) arch = AUDIT_ARCH_PARISC64; #endif return arch; diff --git a/arch/parisc/include/asm/termios.h b/arch/parisc/include/asm/termios.h deleted file mode 100644 index cded9dc90c1b..000000000000 --- a/arch/parisc/include/asm/termios.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _PARISC_TERMIOS_H -#define _PARISC_TERMIOS_H - -#include <uapi/asm/termios.h> - - -/* intr=^C quit=^\ erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* _PARISC_TERMIOS_H */ diff --git a/arch/parisc/include/asm/text-patching.h b/arch/parisc/include/asm/text-patching.h new file mode 100644 index 000000000000..400d84c6e504 --- /dev/null +++ b/arch/parisc/include/asm/text-patching.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PARISC_KERNEL_PATCH_H +#define _PARISC_KERNEL_PATCH_H + +/* stop machine and patch kernel text */ +void patch_text(void *addr, unsigned int insn); +void patch_text_multiple(void *addr, u32 *insn, unsigned int len); + +/* patch kernel text with machine already stopped (e.g. in kgdb) */ +void __patch_text(void *addr, u32 insn); +void __patch_text_multiple(void *addr, u32 *insn, unsigned int len); + +#endif diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 285757544cca..b283738bb6da 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -2,33 +2,25 @@ #ifndef _ASM_PARISC_THREAD_INFO_H #define _ASM_PARISC_THREAD_INFO_H -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> #include <asm/special_insns.h> struct thread_info { - struct task_struct *task; /* main task structure */ unsigned long flags; /* thread_info flags (see TIF_*) */ - mm_segment_t addr_limit; /* user-level address space limit */ - __u32 cpu; /* current CPU */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ +#ifdef CONFIG_SMP + unsigned int cpu; +#endif }; #define INIT_THREAD_INFO(tsk) \ { \ - .task = &tsk, \ .flags = 0, \ - .cpu = 0, \ - .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ } -/* how to get the thread information struct from C */ -#define current_thread_info() ((struct thread_info *)mfctl(30)) - -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ /* thread information allocation */ @@ -52,15 +44,18 @@ struct thread_info { #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SINGLESTEP 9 /* single stepping? */ #define TIF_BLOCKSTEP 10 /* branch stepping? */ #define TIF_SECCOMP 11 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ +#define TIF_NONBLOCK_WARNING 13 /* warned about wrong O_NONBLOCK usage */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_32BIT (1 << TIF_32BIT) @@ -72,7 +67,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_NEED_RESCHED) + _TIF_NEED_RESCHED | _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) @@ -87,6 +82,4 @@ struct thread_info { # define is_32bit_task() (1) #endif -#endif /* __KERNEL__ */ - #endif /* _ASM_PARISC_THREAD_INFO_H */ diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 45537cd4d1d3..b4622cb06a75 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -7,14 +7,16 @@ #ifndef _ASMPARISC_TIMEX_H #define _ASMPARISC_TIMEX_H +#include <asm/special_insns.h> #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ typedef unsigned long cycles_t; -static inline cycles_t get_cycles (void) +static inline cycles_t get_cycles(void) { return mfctl(16); } +#define get_cycles get_cycles #endif diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h index 0c881e74d8a6..44235f367674 100644 --- a/arch/parisc/include/asm/tlb.h +++ b/arch/parisc/include/asm/tlb.h @@ -2,27 +2,11 @@ #ifndef _PARISC_TLB_H #define _PARISC_TLB_H -#define tlb_flush(tlb) \ -do { if ((tlb)->fullmm) \ - flush_tlb_mm((tlb)->mm);\ -} while (0) - -#define tlb_start_vma(tlb, vma) \ -do { if (!(tlb)->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define tlb_end_vma(tlb, vma) \ -do { if (!(tlb)->fullmm) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define __tlb_remove_tlb_entry(tlb, pte, address) \ - do { } while (0) - #include <asm-generic/tlb.h> +#if CONFIG_PGTABLE_LEVELS == 3 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +#endif #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #endif diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 6804374efa66..5ffd7c17f593 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -8,21 +8,6 @@ #include <linux/sched.h> #include <asm/mmu_context.h> - -/* This is for the serialisation of PxTLB broadcasts. At least on the - * N class systems, only one PxTLB inter processor broadcast can be - * active at any one time on the Merced bus. This tlb purge - * synchronisation is fairly lightweight and harmless so we activate - * it on all systems not just the N class. - - * It is also used to ensure PTE updates are atomic and consistent - * with the TLB. - */ -extern spinlock_t pa_tlb_lock; - -#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) -#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) - extern void flush_tlb_all(void); extern void flush_tlb_all_local(void *); @@ -32,7 +17,7 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end); #define flush_tlb_range(vma, start, end) \ - __flush_tlb_range((vma)->vm_mm->context, start, end) + __flush_tlb_range((vma)->vm_mm->context.space_id, start, end) #define flush_tlb_kernel_range(start, end) \ __flush_tlb_range(0, start, end) @@ -79,13 +64,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - unsigned long flags, sid; - - sid = vma->vm_mm->context; - purge_tlb_start(flags); - mtsp(sid, 1); - pdtlb(addr); - pitlb(addr); - purge_tlb_end(flags); + purge_tlb_entries(vma->vm_mm, addr); } #endif diff --git a/arch/parisc/include/asm/topology.h b/arch/parisc/include/asm/topology.h index 6f0750c74e47..406afb356f1a 100644 --- a/arch/parisc/include/asm/topology.h +++ b/arch/parisc/include/asm/topology.h @@ -1,33 +1,16 @@ #ifndef _ASM_PARISC_TOPOLOGY_H #define _ASM_PARISC_TOPOLOGY_H -#ifdef CONFIG_PARISC_CPU_TOPOLOGY +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY #include <linux/cpumask.h> - -struct cputopo_parisc { - int thread_id; - int core_id; - int socket_id; - cpumask_t thread_sibling; - cpumask_t core_sibling; -}; - -extern struct cputopo_parisc cpu_topology[NR_CPUS]; - -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) - -void init_cpu_topology(void); -void store_cpu_topology(unsigned int cpuid); -const struct cpumask *cpu_coregroup_mask(int cpu); +#include <linux/arch_topology.h> #else static inline void init_cpu_topology(void) { } static inline void store_cpu_topology(unsigned int cpuid) { } +static inline void reset_cpu_topology(void) { } #endif diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 8ecc1f0c0483..10c8fb68e404 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -4,7 +4,7 @@ #define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */ -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) struct pt_regs; /* traps.c */ @@ -14,9 +14,11 @@ void parisc_terminate(char *msg, struct pt_regs *regs, void die_if_kernel(char *str, struct pt_regs *regs, long err); /* mm/fault.c */ +unsigned long parisc_acctyp(unsigned long code, unsigned int inst); const char *trap_name(unsigned long code); void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); +int handle_nadtlb_fault(struct pt_regs *regs); #endif #endif diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 30ac2865ea73..6c531d2c847e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -7,117 +7,89 @@ */ #include <asm/page.h> #include <asm/cache.h> +#include <asm/extable.h> #include <linux/bug.h> #include <linux/string.h> -#define KERNEL_DS ((mm_segment_t){0}) -#define USER_DS ((mm_segment_t){1}) - -#define segment_eq(a, b) ((a).seg == (b).seg) - -#define get_ds() (KERNEL_DS) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -/* - * Note that since kernel addresses are in a separate address space on - * parisc, we don't need to do anything for access_ok(). - * We just let the page fault handler do the right thing. This also means - * that put_user is the same as __put_user, etc. - */ - -#define access_ok(uaddr, size) \ - ( (uaddr) == (uaddr) ) +#define TASK_SIZE_MAX DEFAULT_TASK_SIZE +#include <asm/pgtable.h> +#include <asm-generic/access_ok.h> #define put_user __put_user #define get_user __get_user #if !defined(CONFIG_64BIT) -#define LDD_USER(val, ptr) __get_user_asm64(val, ptr) -#define STD_USER(x, ptr) __put_user_asm64(x, ptr) +#define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) +#define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) #else -#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) -#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) +#define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) +#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) #endif -/* - * The exception table contains two values: the first is the relative offset to - * the address of the instruction that is allowed to fault, and the second is - * the relative offset to the address of the fixup routine. Since relative - * addresses are used, 32bit values are sufficient even on 64bit kernel. - */ - -#define ARCH_HAS_RELATIVE_EXTABLE -struct exception_table_entry { - int insn; /* relative address of insn that is allowed to fault. */ - int fixup; /* relative address of fixup routine */ -}; - -#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ - ".section __ex_table,\"aw\"\n" \ - ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ - ".previous\n" - -/* - * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry - * (with lowest bit set) for which the fault handler in fixup_exception() will - * load -EFAULT into %r8 for a read or write fault, and zeroes the target - * register in case of a read fault in get_user(). - */ -#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ - ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) - -/* - * load_sr2() preloads the space register %%sr2 - based on the value of - * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which - * is 0), or with the current value of %%sr3 to access user space (USER_DS) - * memory. The following __get_user_asm() and __put_user_asm() functions have - * %%sr2 hard-coded to access the requested memory. - */ -#define load_sr2() \ - __asm__(" or,= %0,%%r0,%%r0\n\t" \ - " mfsp %%sr3,%0\n\t" \ - " mtsp %0,%%sr2\n\t" \ - : : "r"(get_fs()) : ) - -#define __get_user_internal(val, ptr) \ +#define __get_user_internal(sr, val, ptr) \ ({ \ - register long __gu_err __asm__ ("r8") = 0; \ + ASM_EXCEPTIONTABLE_VAR(__gu_err); \ \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm(val, "ldb", ptr); break; \ - case 2: __get_user_asm(val, "ldh", ptr); break; \ - case 4: __get_user_asm(val, "ldw", ptr); break; \ - case 8: LDD_USER(val, ptr); break; \ + case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ + case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ + case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ + case 8: LDD_USER(sr, val, ptr); break; \ default: BUILD_BUG(); \ } \ \ __gu_err; \ }) -#define __get_user(val, ptr) \ -({ \ - load_sr2(); \ - __get_user_internal(val, ptr); \ +#define __probe_user_internal(sr, error, ptr) \ +({ \ + __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ + "\tcmpiclr,= 1,%0,%0\n" \ + "\tldi %4,%0\n" \ + : "=r"(error) \ + : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ + "i"(-EFAULT)); \ }) -#define __get_user_asm(val, ldx, ptr) \ +#define __get_user(val, ptr) \ +({ \ + register long __gu_err; \ + \ + __gu_err = __get_user_internal(SR_USER, val, ptr); \ + if (likely(!__gu_err)) \ + __probe_user_internal(SR_USER, __gu_err, ptr); \ + __gu_err; \ +}) + +#define __get_user_asm(sr, val, ldx, ptr) \ { \ register long __gu_val; \ \ - __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ + __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \ "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ + : "=r"(__gu_val), "+r"(__gu_err) \ + : "i"(sr), "r"(ptr)); \ \ (val) = (__force __typeof__(*(ptr))) __gu_val; \ } +#define __get_kernel_nofault(dst, src, type, err_label) \ +{ \ + type __z; \ + long __err; \ + __err = __get_user_internal(SR_KERNEL, __z, (type *)(src)); \ + if (unlikely(__err)) \ + goto err_label; \ + else \ + *(type *)(dst) = __z; \ +} + + #if !defined(CONFIG_64BIT) -#define __get_user_asm64(val, ptr) \ +#define __get_user_asm64(sr, val, ptr) \ { \ union { \ unsigned long long l; \ @@ -125,13 +97,13 @@ struct exception_table_entry { } __gu_tmp; \ \ __asm__(" copy %%r0,%R0\n" \ - "1: ldw 0(%%sr2,%2),%0\n" \ - "2: ldw 4(%%sr2,%2),%R0\n" \ + "1: ldw 0(%%sr%2,%3),%0\n" \ + "2: ldw 4(%%sr%2,%3),%R0\n" \ "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ - : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \ + : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ + : "i"(sr), "r"(ptr)); \ \ (val) = __gu_tmp.t; \ } @@ -139,16 +111,15 @@ struct exception_table_entry { #endif /* !defined(CONFIG_64BIT) */ -#define __put_user_internal(x, ptr) \ +#define __put_user_internal(sr, x, ptr) \ ({ \ - register long __pu_err __asm__ ("r8") = 0; \ - __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ + ASM_EXCEPTIONTABLE_VAR(__pu_err); \ \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm("stb", __x, ptr); break; \ - case 2: __put_user_asm("sth", __x, ptr); break; \ - case 4: __put_user_asm("stw", __x, ptr); break; \ - case 8: STD_USER(__x, ptr); break; \ + case 1: __put_user_asm(sr, "stb", x, ptr); break; \ + case 2: __put_user_asm(sr, "sth", x, ptr); break; \ + case 4: __put_user_asm(sr, "stw", x, ptr); break; \ + case 8: STD_USER(sr, x, ptr); break; \ default: BUILD_BUG(); \ } \ \ @@ -157,10 +128,22 @@ struct exception_table_entry { #define __put_user(x, ptr) \ ({ \ - load_sr2(); \ - __put_user_internal(x, ptr); \ + __typeof__(&*(ptr)) __ptr = ptr; \ + __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \ + __put_user_internal(SR_USER, __x, __ptr); \ }) +#define __put_kernel_nofault(dst, src, type, err_label) \ +{ \ + type __z = *(type *)(src); \ + long __err; \ + __err = __put_user_internal(SR_KERNEL, __z, (type *)(dst)); \ + if (unlikely(__err)) \ + goto err_label; \ +} + + + /* * The "__put_user/kernel_asm()" macros tell gcc they read from memory @@ -168,47 +151,45 @@ struct exception_table_entry { * gcc knows about, so there are no aliasing issues. These macros must * also be aware that fixups are executed in the context of the fault, * and any registers used there must be listed as clobbers. - * r8 is already listed as err. + * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG) + * is already listed as input and output register. */ -#define __put_user_asm(stx, x, ptr) \ - __asm__ __volatile__ ( \ - "1: " stx " %2,0(%%sr2,%1)\n" \ - "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err)) +#define __put_user_asm(sr, stx, x, ptr) \ + __asm__ __volatile__ ( \ + "1: " stx " %1,0(%%sr%2,%3)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ + : "+r"(__pu_err) \ + : "r"(x), "i"(sr), "r"(ptr)) #if !defined(CONFIG_64BIT) -#define __put_user_asm64(__val, ptr) do { \ - __asm__ __volatile__ ( \ - "1: stw %2,0(%%sr2,%1)\n" \ - "2: stw %R2,4(%%sr2,%1)\n" \ - "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ - : "=r"(__pu_err) \ - : "r"(ptr), "r"(__val), "0"(__pu_err)); \ +#define __put_user_asm64(sr, __val, ptr) do { \ + __asm__ __volatile__ ( \ + "1: stw %1,0(%%sr%2,%3)\n" \ + "2: stw %R1,4(%%sr%2,%3)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \ + : "+r"(__pu_err) \ + : "r"(__val), "i"(sr), "r"(ptr)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ - /* * Complex access routines -- external declarations */ extern long strncpy_from_user(char *, const char __user *, long); -extern unsigned lclear_user(void __user *, unsigned long); -extern long lstrnlen_user(const char __user *, long); +extern __must_check unsigned lclear_user(void __user *, unsigned long); +extern __must_check long strnlen_user(const char __user *src, long n); /* * Complex access routines -- macros */ -#define user_addr_max() (~0UL) -#define strnlen_user lstrnlen_user #define clear_user lclear_user #define __clear_user lclear_user @@ -216,12 +197,7 @@ unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, unsigned long len); unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, unsigned long len); -unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, - unsigned long len); #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -struct pt_regs; -int fixup_exception(struct pt_regs *regs); - #endif /* __PARISC_UACCESS_H */ diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h deleted file mode 100644 index e9029c7c2a69..000000000000 --- a/arch/parisc/include/asm/unaligned.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_PARISC_UNALIGNED_H -#define _ASM_PARISC_UNALIGNED_H - -#include <linux/unaligned/be_struct.h> -#include <linux/unaligned/le_byteshift.h> -#include <linux/unaligned/generic.h> -#define get_unaligned __get_unaligned_be -#define put_unaligned __put_unaligned_be - -#ifdef __KERNEL__ -struct pt_regs; -void handle_unaligned(struct pt_regs *regs); -int check_unaligned(struct pt_regs *regs); -#endif - -#endif /* _ASM_PARISC_UNALIGNED_H */ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index c2c2afb28941..3e46c6ea9df6 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -6,15 +6,11 @@ #define __NR_Linux_syscalls __NR_syscalls -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define SYS_ify(syscall_name) __NR_##syscall_name -#define __IGNORE_select /* newselect */ #define __IGNORE_fadvise64 /* fadvise64_64 */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free #ifndef ASM_LINE_SEP # define ASM_LINE_SEP ; @@ -24,7 +20,7 @@ * sysdeps/unix/sysv/linux/hppa/sysdep.h */ -#ifdef PIC +#ifndef DONT_USE_PIC /* WARNING: CANNOT BE USED IN A NOP! */ # define K_STW_ASM_PIC " copy %%r19, %%r4\n" # define K_LDW_ASM_PIC " copy %%r4, %%r19\n" @@ -47,7 +43,7 @@ across the syscall. */ #define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \ - "%r20", "%r29", "%r31" + "%r20", "%r29", "%r31" #undef K_INLINE_SYSCALL #define K_INLINE_SYSCALL(name, nr, args...) ({ \ @@ -62,15 +58,11 @@ " ldi %1, %%r20\n" \ K_LDW_ASM_PIC \ : "=r" (__res) \ - : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \ + : "i" (name) K_ASM_ARGS_##nr \ : "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \ ); \ __sys_res = (long)__res; \ } \ - if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \ - errno = -__sys_res; \ - __sys_res = -1; \ - } \ __sys_res; \ }) @@ -112,69 +104,47 @@ #define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25" #define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26" -#define _syscall0(type,name) \ -type name(void) \ -{ \ - return K_INLINE_SYSCALL(name, 0); \ -} - -#define _syscall1(type,name,type1,arg1) \ -type name(type1 arg1) \ -{ \ - return K_INLINE_SYSCALL(name, 1, arg1); \ -} - -#define _syscall2(type,name,type1,arg1,type2,arg2) \ -type name(type1 arg1, type2 arg2) \ -{ \ - return K_INLINE_SYSCALL(name, 2, arg1, arg2); \ -} - -#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ -type name(type1 arg1, type2 arg2, type3 arg3) \ -{ \ - return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \ -} - -#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ -{ \ - return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \ -} - -/* select takes 5 arguments */ -#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ -{ \ - return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \ -} +#define syscall0(name) \ + K_INLINE_SYSCALL(name, 0) +#define syscall1(name, arg1) \ + K_INLINE_SYSCALL(name, 1, arg1) +#define syscall2(name, arg1, arg2) \ + K_INLINE_SYSCALL(name, 2, arg1, arg2) +#define syscall3(name, arg1, arg2, arg3) \ + K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3) +#define syscall4(name, arg1, arg2, arg3, arg4) \ + K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4) +#define syscall5(name, arg1, arg2, arg3, arg4, arg5) \ + K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5) #define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_SYS_TIME32 #define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL -#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_COMPAT_SYS_SENDFILE +#define __ARCH_WANT_COMPAT_STAT + +#ifdef CONFIG_64BIT +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #undef STR diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h new file mode 100644 index 000000000000..81bc1d42802a --- /dev/null +++ b/arch/parisc/include/asm/vdso.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PARISC_VDSO_H__ +#define __PARISC_VDSO_H__ + +#ifndef __ASSEMBLER__ + +#ifdef CONFIG_64BIT +#include <generated/vdso64-offsets.h> +#endif +#include <generated/vdso32-offsets.h> + +#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) +#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) + +#endif /* __ASSEMBLER__ */ + +/* Default link addresses for the vDSOs */ +#define VDSO_LBASE 0 + +#define VDSO_VERSION_STRING LINUX_6.11 + +#endif /* __PARISC_VDSO_H__ */ diff --git a/arch/parisc/include/asm/video.h b/arch/parisc/include/asm/video.h new file mode 100644 index 000000000000..a9d50ebd6e76 --- /dev/null +++ b/arch/parisc/include/asm/video.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include <linux/types.h> + +struct device; + +#if defined(CONFIG_STI_CORE) && defined(CONFIG_VIDEO) +bool video_is_primary_device(struct device *dev); +#define video_is_primary_device video_is_primary_device +#endif + +#include <asm-generic/video.h> + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/parisc/include/asm/vmalloc.h b/arch/parisc/include/asm/vmalloc.h new file mode 100644 index 000000000000..1088ae4e7af9 --- /dev/null +++ b/arch/parisc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_PARISC_VMALLOC_H +#define _ASM_PARISC_VMALLOC_H + +#endif /* _ASM_PARISC_VMALLOC_H */ diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index c54353d390ff..353b70b1998f 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -1,5 +1,3 @@ -include include/uapi/asm-generic/Kbuild.asm - +# SPDX-License-Identifier: GPL-2.0 generated-y += unistd_32.h generated-y += unistd_64.h -generic-y += kvm_para.h diff --git a/arch/parisc/include/uapi/asm/auxvec.h b/arch/parisc/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..90d2aa699cf3 --- /dev/null +++ b/arch/parisc/include/uapi/asm/auxvec.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PARISC_AUXVEC_H +#define _UAPI_PARISC_AUXVEC_H + +/* The vDSO location. */ +#define AT_SYSINFO_EHDR 33 + +#endif /* _UAPI_PARISC_AUXVEC_H */ diff --git a/arch/parisc/include/uapi/asm/cachectl.h b/arch/parisc/include/uapi/asm/cachectl.h new file mode 100644 index 000000000000..68d6b455498b --- /dev/null +++ b/arch/parisc/include/uapi/asm/cachectl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_CACHECTL +#define _ASM_CACHECTL + +/* + * Options for cacheflush system call + */ +#define ICACHE (1<<0) /* flush instruction cache */ +#define DCACHE (1<<1) /* writeback and flush data cache */ +#define BCACHE (ICACHE|DCACHE) /* flush both caches */ + +#endif /* _ASM_CACHECTL */ diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h index 87245c584784..8d94739d75c6 100644 --- a/arch/parisc/include/uapi/asm/errno.h +++ b/arch/parisc/include/uapi/asm/errno.h @@ -75,7 +75,6 @@ /* We now return you to your regularly scheduled HPUX. */ -#define ENOSYM 215 /* symbol does not exist in executable */ #define ENOTSOCK 216 /* Socket operation on non-socket */ #define EDESTADDRREQ 217 /* Destination address required */ #define EMSGSIZE 218 /* Message too long */ @@ -101,7 +100,6 @@ #define ETIMEDOUT 238 /* Connection timed out */ #define ECONNREFUSED 239 /* Connection refused */ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ -#define EREMOTERELEASE 240 /* Remote peer released connection */ #define EHOSTDOWN 241 /* Host is down */ #define EHOSTUNREACH 242 /* No route to host */ diff --git a/arch/parisc/include/uapi/asm/fcntl.h b/arch/parisc/include/uapi/asm/fcntl.h index 03ce20e5ad7d..03dee816cb13 100644 --- a/arch/parisc/include/uapi/asm/fcntl.h +++ b/arch/parisc/include/uapi/asm/fcntl.h @@ -3,22 +3,19 @@ #define _PARISC_FCNTL_H #define O_APPEND 000000010 -#define O_BLKSEEK 000000100 /* HPUX only */ #define O_CREAT 000000400 /* not fcntl */ #define O_EXCL 000002000 /* not fcntl */ #define O_LARGEFILE 000004000 #define __O_SYNC 000100000 #define O_SYNC (__O_SYNC|O_DSYNC) -#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ +#define O_NONBLOCK 000200000 #define O_NOCTTY 000400000 /* not fcntl */ -#define O_DSYNC 001000000 /* HPUX only */ -#define O_RSYNC 002000000 /* HPUX only */ +#define O_DSYNC 001000000 #define O_NOATIME 004000000 #define O_CLOEXEC 010000000 /* set close_on_exec */ #define O_DIRECTORY 000010000 /* must be a directory */ #define O_NOFOLLOW 000000200 /* don't follow links */ -#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ #define O_PATH 020000000 #define __O_TMPFILE 040000000 diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h index 82d1148c6379..74b4027a4e80 100644 --- a/arch/parisc/include/uapi/asm/ioctls.h +++ b/arch/parisc/include/uapi/asm/ioctls.h @@ -10,10 +10,10 @@ #define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */ #define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */ #define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */ -#define TCGETA _IOR('T', 1, struct termio) -#define TCSETA _IOW('T', 2, struct termio) -#define TCSETAW _IOW('T', 3, struct termio) -#define TCSETAF _IOW('T', 4, struct termio) +#define TCGETA 0x40125401 +#define TCSETA 0x80125402 +#define TCSETAW 0x80125403 +#define TCSETAF 0x80125404 #define TCSBRK _IO('T', 5) #define TCXONC _IO('T', 6) #define TCFLSH _IO('T', 7) diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 870fbf8c7088..b6a709506987 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -10,9 +10,7 @@ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x2b /* Mask for type of mapping, includes bits 0x08 and 0x20 */ #define MAP_FIXED 0x04 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x10 /* don't use a file */ @@ -27,6 +25,7 @@ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_UNINITIALIZED 0 /* uninitialized anonymous mmap */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ @@ -50,25 +49,37 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ -#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ -#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ -#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */ -#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */ +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ -#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump, +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, overrides the coredump filter bits */ -#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ -#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ -#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ #define MADV_HWPOISON 100 /* poison a page for testing */ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ +#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */ +#define MADV_GUARD_REMOVE 103 /* unguard range */ + /* compatibility flags */ #define MAP_FILE 0 -#define MAP_VARIABLE 0 #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h index 6a2e9ab2ef8d..3b4de5b668c3 100644 --- a/arch/parisc/include/uapi/asm/msgbuf.h +++ b/arch/parisc/include/uapi/asm/msgbuf.h @@ -3,6 +3,7 @@ #define _PARISC_MSGBUF_H #include <asm/bitsperlong.h> +#include <asm/ipcbuf.h> /* * The msqid64_ds structure for parisc architecture, copied from sparc. @@ -16,9 +17,9 @@ struct msqid64_ds { struct ipc64_perm msg_perm; #if __BITS_PER_LONG == 64 - __kernel_time_t msg_stime; /* last msgsnd time */ - __kernel_time_t msg_rtime; /* last msgrcv time */ - __kernel_time_t msg_ctime; /* last change time */ + long msg_stime; /* last msgsnd time */ + long msg_rtime; /* last msgrcv time */ + long msg_ctime; /* last change time */ #else unsigned long msg_stime_high; unsigned long msg_stime; /* last msgsnd time */ diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h index 593eeb573138..65031ddf8372 100644 --- a/arch/parisc/include/uapi/asm/pdc.h +++ b/arch/parisc/include/uapi/asm/pdc.h @@ -4,7 +4,7 @@ /* * PDC return values ... - * All PDC calls return a subset of these errors. + * All PDC calls return a subset of these errors. */ #define PDC_WARN 3 /* Call completed with a warning */ @@ -58,8 +58,10 @@ #define PDC_MODEL_NVA_SUPPORTED (0 << 4) #define PDC_MODEL_NVA_SLOW (1 << 4) #define PDC_MODEL_NVA_UNSUPPORTED (3 << 4) -#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */ -#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */ +#define PDC_MODEL_FIRM_TEST_GET 8 /* returns boot test options */ +#define PDC_MODEL_FIRM_TEST_SET 9 /* set boot test options */ +#define PDC_MODEL_GET_PLATFORM_INFO 10 /* returns platform info */ +#define PDC_MODEL_GET_INSTALL_KERNEL 11 /* returns kernel for installation */ #define PA89_INSTRUCTION_SET 0x4 /* capabilities returned */ #define PA90_INSTRUCTION_SET 0x8 @@ -99,7 +101,7 @@ #define PDC_TOD 9 /* time-of-day clock (TOD) */ #define PDC_TOD_READ 0 /* read TOD */ #define PDC_TOD_WRITE 1 /* write TOD */ - +#define PDC_TOD_CALIBRATE 2 /* calibrate timers */ #define PDC_STABLE 10 /* stable storage (sprockets) */ #define PDC_STABLE_READ 0 @@ -109,15 +111,22 @@ #define PDC_STABLE_INITIALIZE 4 #define PDC_NVOLATILE 11 /* often not implemented */ +#define PDC_NVOLATILE_READ 0 +#define PDC_NVOLATILE_WRITE 1 +#define PDC_NVOLATILE_RETURN_SIZE 2 +#define PDC_NVOLATILE_VERIFY_CONTENTS 3 +#define PDC_NVOLATILE_INITIALIZE 4 #define PDC_ADD_VALID 12 /* Memory validation PDC call */ #define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */ +#define PDC_DEBUG 14 /* Obsolete */ + #define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */ #define PDC_PROC 16 /* (sprockets) */ -#define PDC_CONFIG 16 /* (sprockets) */ +#define PDC_CONFIG 17 /* (sprockets) */ #define PDC_CONFIG_DECONFIG 0 #define PDC_CONFIG_DRECONFIG 1 #define PDC_CONFIG_DRETURN_CONFIG 2 @@ -156,7 +165,7 @@ #define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */ #define PDC_PSW_SET_DEFAULTS 2 /* Set default */ #define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */ -#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */ +#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */ #define PDC_SYSTEM_MAP 22 /* find system modules */ #define PDC_FIND_MODULE 0 @@ -167,6 +176,15 @@ #define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */ #define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */ +#define PDC_ALLOC 24 /* allocate static storage for PDC & IODC */ + +#define PDC_CRASH_PREP 25 /* Prepare system for crash dump */ +#define PDC_CRASH_DUMP 0 /* Do platform specific preparations for dump */ +#define PDC_CRASH_LOG_CEC_ERROR 1 /* Dump hardware registers */ + +#define PDC_SCSI_PARMS 26 /* Get and set SCSI parameters */ +#define PDC_SCSI_GET_PARMS 0 /* Get SCSI parameters for I/O device */ +#define PDC_SCSI_SET_PARMS 1 /* Set SCSI parameters for I/O device */ /* HVERSION dependent */ @@ -256,10 +274,14 @@ #define PDC_PCI_PCI_INT_ROUTE_SIZE 13 #define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE #define PDC_PCI_PCI_INT_ROUTE 14 -#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE +#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE #define PDC_PCI_READ_MON_TYPE 15 #define PDC_PCI_WRITE_MON_TYPE 16 +#define PDC_RELOCATE 149 /* (sprockets) */ +#define PDC_RELOCATE_GET_RELOCINFO 0 +#define PDC_RELOCATE_CHECKSUM 1 +#define PDC_RELOCATE_RELOCATE 2 /* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */ #define PDC_INITIATOR 163 @@ -323,7 +345,7 @@ /* constants for PDC_CHASSIS */ #define OSTAT_OFF 0 -#define OSTAT_FLT 1 +#define OSTAT_FLT 1 #define OSTAT_TEST 2 #define OSTAT_INIT 3 #define OSTAT_SHUT 4 @@ -339,22 +361,27 @@ /* size of the pdc_result buffer for firmware.c */ #define NUM_PDC_RESULT 32 -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLER__) -/* flags of the device_path */ +/* flags for hardware_path */ #define PF_AUTOBOOT 0x80 #define PF_AUTOSEARCH 0x40 #define PF_TIMER 0x0F -struct device_path { /* page 1-69 */ - unsigned char flags; /* flags see above! */ - unsigned char bc[6]; /* bus converter routing info */ - unsigned char mod; - unsigned int layers[6];/* device-specific layer-info */ -} __attribute__((aligned(8))) ; +struct hardware_path { + unsigned char flags; /* see bit definitions below */ + signed char bc[6]; /* Bus Converter routing info to a specific */ + /* I/O adaptor (< 0 means none, > 63 resvd) */ + signed char mod; /* fixed field of specified module */ +}; + +struct pdc_module_path { /* page 1-69 */ + struct hardware_path path; + unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */ +} __attribute__((aligned(8))); struct pz_device { - struct device_path dp; /* see above */ + struct pdc_module_path dp; /* see above */ /* struct iomod *hpa; */ unsigned int hpa; /* HPA base address */ /* char *spa; */ @@ -376,10 +403,12 @@ struct zeropage { /* int (*vec_rendz)(void); */ unsigned int vec_rendz; int vec_pow_fail_flen; - int vec_pad[10]; - + int vec_pad0[3]; + unsigned int vec_toc_hi; + int vec_pad1[6]; + /* [0x040] reserved processor dependent */ - int pad0[112]; + int pad0[112]; /* in QEMU pad0[0] holds "SeaBIOS\0" */ /* [0x200] reserved */ int pad1[84]; @@ -443,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */ unsigned long arch_rev; unsigned long pot_key; unsigned long curr_key; + unsigned long width; /* default of PSW_W bit (1=enabled) */ }; struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */ @@ -580,6 +610,12 @@ struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */ unsigned long mod_pgs; }; +struct pdc_relocate_info_block { /* PDC_RELOCATE_INFO */ + unsigned long pdc_size; + unsigned long pdc_alignment; + unsigned long pdc_address; +}; + struct pdc_initiator { /* PDC_INITIATOR */ int host_id; int factor; @@ -587,21 +623,6 @@ struct pdc_initiator { /* PDC_INITIATOR */ int mode; }; -struct hardware_path { - char flags; /* see bit definitions below */ - char bc[6]; /* Bus Converter routing info to a specific */ - /* I/O adaptor (< 0 means none, > 63 resvd) */ - char mod; /* fixed field of specified module */ -}; - -/* - * Device path specifications used by PDC. - */ -struct pdc_module_path { - struct hardware_path path; - unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */ -}; - /* Only used on some pre-PA2.0 boxes */ struct pdc_memory_map { /* PDC_MEMORY_MAP */ unsigned long hpa; /* mod's register set address */ @@ -667,6 +688,59 @@ struct pdc_hpmc_pim_20 { /* PDC_PIM */ unsigned long long fr[32]; }; -#endif /* !defined(__ASSEMBLY__) */ +struct pim_cpu_state_cf { + union { + unsigned int + iqv : 1, /* IIA queue Valid */ + iqf : 1, /* IIA queue Failure */ + ipv : 1, /* IPRs Valid */ + grv : 1, /* GRs Valid */ + crv : 1, /* CRs Valid */ + srv : 1, /* SRs Valid */ + trv : 1, /* CR24 through CR31 valid */ + pad : 24, /* reserved */ + td : 1; /* TOC did not cause any damage to the system state */ + unsigned int val; + }; +}; + +struct pdc_toc_pim_11 { + unsigned int gr[32]; + unsigned int cr[32]; + unsigned int sr[8]; + unsigned int iasq_back; + unsigned int iaoq_back; + unsigned int check_type; + struct pim_cpu_state_cf cpu_state; +}; + +struct pdc_toc_pim_20 { + unsigned long long gr[32]; + unsigned long long cr[32]; + unsigned long long sr[8]; + unsigned long long iasq_back; + unsigned long long iaoq_back; + unsigned int check_type; + struct pim_cpu_state_cf cpu_state; +}; + +/* for SpeedyBoot/firm_ctl funtionality */ +struct pdc_firm_test_get_rtn_block { /* PDC_MODEL/PDC_FIRM_TEST_GET */ + unsigned long current_tests; /* u_R_addr Raddr_ints[0] */ + unsigned long tests_supported; /* u_R_addr Raddr_ints[1] */ + unsigned long default_tests; /* u_R_addr Raddr_ints[2] */ +}; + +#define TORNADO_CPU_ID 0xB +#define PCXL_CPU_ID 0xD +#define PCXU_CPU_ID 0xE /* U and U+ for all but C-class with bug */ +#define VR_CPU_ID 0xF +#define PCXU_PLUS_CPU_ID 0x10 /* U+ only on C-class with bug */ +#define PCXW_CPU_ID 0x11 +#define PCXW_PLUS_CPU_ID 0x12 +#define PIRANHA_CPU_ID 0x13 +#define MAKO_CPU_ID 0x14 + +#endif /* !defined(__ASSEMBLER__) */ #endif /* _UAPI_PARISC_PDC_H */ diff --git a/arch/parisc/include/uapi/asm/perf_regs.h b/arch/parisc/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..1ae687bb3d3c --- /dev/null +++ b/arch/parisc/include/uapi/asm/perf_regs.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_PARISC_PERF_REGS_H +#define _UAPI_ASM_PARISC_PERF_REGS_H + +/* see struct user_regs_struct */ +enum perf_event_parisc_regs { + PERF_REG_PARISC_R0, /* PSW is in gr[0] */ + PERF_REG_PARISC_R1, + PERF_REG_PARISC_R2, + PERF_REG_PARISC_R3, + PERF_REG_PARISC_R4, + PERF_REG_PARISC_R5, + PERF_REG_PARISC_R6, + PERF_REG_PARISC_R7, + PERF_REG_PARISC_R8, + PERF_REG_PARISC_R9, + PERF_REG_PARISC_R10, + PERF_REG_PARISC_R11, + PERF_REG_PARISC_R12, + PERF_REG_PARISC_R13, + PERF_REG_PARISC_R14, + PERF_REG_PARISC_R15, + PERF_REG_PARISC_R16, + PERF_REG_PARISC_R17, + PERF_REG_PARISC_R18, + PERF_REG_PARISC_R19, + PERF_REG_PARISC_R20, + PERF_REG_PARISC_R21, + PERF_REG_PARISC_R22, + PERF_REG_PARISC_R23, + PERF_REG_PARISC_R24, + PERF_REG_PARISC_R25, + PERF_REG_PARISC_R26, + PERF_REG_PARISC_R27, + PERF_REG_PARISC_R28, + PERF_REG_PARISC_R29, + PERF_REG_PARISC_R30, + PERF_REG_PARISC_R31, + + PERF_REG_PARISC_SR0, + PERF_REG_PARISC_SR1, + PERF_REG_PARISC_SR2, + PERF_REG_PARISC_SR3, + PERF_REG_PARISC_SR4, + PERF_REG_PARISC_SR5, + PERF_REG_PARISC_SR6, + PERF_REG_PARISC_SR7, + + PERF_REG_PARISC_IAOQ0, + PERF_REG_PARISC_IAOQ1, + PERF_REG_PARISC_IASQ0, + PERF_REG_PARISC_IASQ1, + + PERF_REG_PARISC_SAR, /* CR11 */ + PERF_REG_PARISC_IIR, /* CR19 */ + PERF_REG_PARISC_ISR, /* CR20 */ + PERF_REG_PARISC_IOR, /* CR21 */ + PERF_REG_PARISC_IPSW, /* CR22 */ + + PERF_REG_PARISC_MAX +}; + +#endif /* _UAPI_ASM_PARISC_PERF_REGS_H */ diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h index 3c31163b1241..e2ca4301c7fb 100644 --- a/arch/parisc/include/uapi/asm/sembuf.h +++ b/arch/parisc/include/uapi/asm/sembuf.h @@ -3,6 +3,7 @@ #define _PARISC_SEMBUF_H #include <asm/bitsperlong.h> +#include <asm/ipcbuf.h> /* * The semid64_ds structure for parisc architecture. @@ -16,8 +17,8 @@ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ #if __BITS_PER_LONG == 64 - __kernel_time_t sem_otime; /* last semop time */ - __kernel_time_t sem_ctime; /* last change time */ + long sem_otime; /* last semop time */ + long sem_ctime; /* last change time */ #else unsigned long sem_otime_high; unsigned long sem_otime; /* last semop time */ diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h index c89b3dd8db21..532da742fb56 100644 --- a/arch/parisc/include/uapi/asm/shmbuf.h +++ b/arch/parisc/include/uapi/asm/shmbuf.h @@ -3,6 +3,8 @@ #define _PARISC_SHMBUF_H #include <asm/bitsperlong.h> +#include <asm/ipcbuf.h> +#include <asm/posix_types.h> /* * The shmid64_ds structure for parisc architecture. @@ -16,9 +18,9 @@ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ #if __BITS_PER_LONG == 64 - __kernel_time_t shm_atime; /* last attach time */ - __kernel_time_t shm_dtime; /* last detach time */ - __kernel_time_t shm_ctime; /* last change time */ + long shm_atime; /* last attach time */ + long shm_dtime; /* last detach time */ + long shm_ctime; /* last change time */ #else unsigned long shm_atime_high; unsigned long shm_atime; /* last attach time */ diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index d38563a394f2..d99accf37341 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -35,25 +35,12 @@ #define SIGURG 29 #define SIGXFSZ 30 #define SIGUNUSED 31 -#define SIGSYS 31 /* Linux doesn't use this */ +#define SIGSYS 31 /* These should not be considered constants from userland. */ #define SIGRTMIN 32 -#define SIGRTMAX _NSIG /* it's 44 under HP/UX */ +#define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ #define SA_ONSTACK 0x00000001 #define SA_RESETHAND 0x00000004 #define SA_NOCLDSTOP 0x00000008 @@ -61,7 +48,6 @@ #define SA_NODEFER 0x00000020 #define SA_RESTART 0x00000040 #define SA_NOCLDWAIT 0x00000080 -#define _SA_SIGGFAULT 0x00000100 /* HPUX */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND @@ -69,39 +55,30 @@ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#include <asm-generic/signal-defs.h> -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ +#define _NSIG 64 +#define _NSIG_BPW (sizeof(unsigned long) * 8) +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ - -# ifndef __ASSEMBLY__ +# ifndef __ASSEMBLER__ # include <linux/types.h> +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + /* Avoid too many header ordering problems. */ struct siginfo; -/* Type of a signal handler. */ -#if defined(__LP64__) -/* function pointers on 64-bit parisc are pointers to little structs and the - * compiler doesn't support code which changes or tests the address of - * the function in the little struct. This is really ugly -PB - */ -typedef char __user *__sighandler_t; -#else -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; -#endif - typedef struct sigaltstack { void __user *ss_sp; int ss_flags; - size_t ss_size; + __kernel_size_t ss_size; } stack_t; -#endif /* !__ASSEMBLY */ +#endif /* !__ASSEMBLER__ */ #endif /* _UAPI_ASM_PARISC_SIGNAL_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 52bed5976cbe..c16ec36dfee6 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -2,6 +2,7 @@ #ifndef _UAPI_ASM_SOCKET_H #define _UAPI_ASM_SOCKET_H +#include <linux/posix_types.h> #include <asm/sockios.h> /* For setsockopt(2) */ @@ -21,8 +22,8 @@ #define SO_RCVBUFFORCE 0x100b #define SO_SNDLOWAT 0x1003 #define SO_RCVLOWAT 0x1004 -#define SO_SNDTIMEO 0x1005 -#define SO_RCVTIMEO 0x1006 +#define SO_SNDTIMEO_OLD 0x1005 +#define SO_RCVTIMEO_OLD 0x1006 #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 #define SO_PROTOCOL 0x1028 @@ -34,10 +35,6 @@ #define SO_BSDCOMPAT 0x400e #define SO_PASSCRED 0x4010 #define SO_PEERCRED 0x4011 -#define SO_TIMESTAMP 0x4012 -#define SCM_TIMESTAMP SO_TIMESTAMP -#define SO_TIMESTAMPNS 0x4013 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x4016 @@ -58,9 +55,6 @@ #define SO_MARK 0x401f -#define SO_TIMESTAMPING 0x4020 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 0x4021 #define SO_WIFI_STATUS 0x4022 @@ -109,4 +103,71 @@ #define SO_BINDTOIFINDEX 0x4037 +#define SO_TIMESTAMP_OLD 0x4012 +#define SO_TIMESTAMPNS_OLD 0x4013 +#define SO_TIMESTAMPING_OLD 0x4020 + +#define SO_TIMESTAMP_NEW 0x4038 +#define SO_TIMESTAMPNS_NEW 0x4039 +#define SO_TIMESTAMPING_NEW 0x403A + +#define SO_RCVTIMEO_NEW 0x4040 +#define SO_SNDTIMEO_NEW 0x4041 + +#define SO_DETACH_REUSEPORT_BPF 0x4042 + +#define SO_PREFER_BUSY_POLL 0x4043 +#define SO_BUSY_POLL_BUDGET 0x4044 + +#define SO_NETNS_COOKIE 0x4045 + +#define SO_BUF_LOCK 0x4046 + +#define SO_RESERVE_MEM 0x4047 + +#define SO_TXREHASH 0x4048 + +#define SO_RCVMARK 0x4049 + +#define SO_PASSPIDFD 0x404A +#define SO_PEERPIDFD 0x404B + +#define SCM_TS_OPT_ID 0x404C + +#define SO_RCVPRIORITY 0x404D + +#define SO_DEVMEM_LINEAR 0x404E +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 0x404F +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 0x4050 + +#define SO_PASSRIGHTS 0x4051 + +#define SO_INQ 0x4052 +#define SCM_INQ SO_INQ + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/sockios.h b/arch/parisc/include/uapi/asm/sockios.h deleted file mode 100644 index 66a3ba64d53f..000000000000 --- a/arch/parisc/include/uapi/asm/sockios.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __ARCH_PARISC_SOCKIOS__ -#define __ARCH_PARISC_SOCKIOS__ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h deleted file mode 100644 index 35fb2d1bfbbd..000000000000 --- a/arch/parisc/include/uapi/asm/swab.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _PARISC_SWAB_H -#define _PARISC_SWAB_H - -#include <asm/bitsperlong.h> -#include <linux/types.h> -#include <linux/compiler.h> - -#define __SWAB_64_THRU_32__ - -static inline __attribute_const__ __u16 __arch_swab16(__u16 x) -{ - __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ - "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ - : "=r" (x) - : "0" (x)); - return x; -} -#define __arch_swab16 __arch_swab16 - -static inline __attribute_const__ __u32 __arch_swab24(__u32 x) -{ - __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ - "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ - "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ - : "=r" (x) - : "0" (x)); - return x; -} - -static inline __attribute_const__ __u32 __arch_swab32(__u32 x) -{ - unsigned int temp; - __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ - "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ - "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ - : "=r" (x), "=&r" (temp) - : "0" (x)); - return x; -} -#define __arch_swab32 __arch_swab32 - -#if __BITS_PER_LONG > 32 -/* -** From "PA-RISC 2.0 Architecture", HP Professional Books. -** See Appendix I page 8 , "Endian Byte Swapping". -** -** Pretty cool algorithm: (* == zero'd bits) -** PERMH 01234567 -> 67452301 into %0 -** HSHL 67452301 -> 7*5*3*1* into %1 -** HSHR 67452301 -> *6*4*2*0 into %0 -** OR %0 | %1 -> 76543210 into %0 (all done!) -*/ -static inline __attribute_const__ __u64 __arch_swab64(__u64 x) -{ - __u64 temp; - __asm__("permh,3210 %0, %0\n\t" - "hshl %0, 8, %1\n\t" - "hshr,u %0, 8, %0\n\t" - "or %1, %0, %0" - : "=r" (x), "=&r" (temp) - : "0" (x)); - return x; -} -#define __arch_swab64 __arch_swab64 -#endif /* __BITS_PER_LONG > 32 */ - -#endif /* _PARISC_SWAB_H */ diff --git a/arch/parisc/include/uapi/asm/termbits.h b/arch/parisc/include/uapi/asm/termbits.h index 40e920f8d683..3a8938d26fb4 100644 --- a/arch/parisc/include/uapi/asm/termbits.h +++ b/arch/parisc/include/uapi/asm/termbits.h @@ -2,10 +2,8 @@ #ifndef __ARCH_PARISC_TERMBITS_H__ #define __ARCH_PARISC_TERMBITS_H__ -#include <linux/posix_types.h> +#include <asm-generic/termbits-common.h> -typedef unsigned char cc_t; -typedef unsigned int speed_t; typedef unsigned int tcflag_t; #define NCCS 19 @@ -41,158 +39,107 @@ struct ktermios { }; /* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 /* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0040000 -#define IUTF8 0100000 +#define IUCLC 0x0200 +#define IXON 0x0400 +#define IXOFF 0x1000 +#define IMAXBEL 0x4000 +#define IUTF8 0x8000 /* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 +#define OLCUC 0x00002 +#define ONLCR 0x00004 +#define NLDLY 0x00100 +#define NL0 0x00000 +#define NL1 0x00100 +#define CRDLY 0x00600 +#define CR0 0x00000 +#define CR1 0x00200 +#define CR2 0x00400 +#define CR3 0x00600 +#define TABDLY 0x01800 +#define TAB0 0x00000 +#define TAB1 0x00800 +#define TAB2 0x01000 +#define TAB3 0x01800 +#define XTABS 0x01800 +#define BSDLY 0x02000 +#define BS0 0x00000 +#define BS1 0x02000 +#define VTDLY 0x04000 +#define VT0 0x00000 +#define VT1 0x04000 +#define FFDLY 0x08000 +#define FF0 0x00000 +#define FF1 0x08000 /* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ - +#define CBAUD 0x0000100f +#define CSIZE 0x00000030 +#define CS5 0x00000000 +#define CS6 0x00000010 +#define CS7 0x00000020 +#define CS8 0x00000030 +#define CSTOPB 0x00000040 +#define CREAD 0x00000080 +#define PARENB 0x00000100 +#define PARODD 0x00000200 +#define HUPCL 0x00000400 +#define CLOCAL 0x00000800 +#define CBAUDEX 0x00001000 +#define BOTHER 0x00001000 +#define B57600 0x00001001 +#define B115200 0x00001002 +#define B230400 0x00001003 +#define B460800 0x00001004 +#define B500000 0x00001005 +#define B576000 0x00001006 +#define B921600 0x00001007 +#define B1000000 0x00001008 +#define B1152000 0x00001009 +#define B1500000 0x0000100a +#define B2000000 0x0000100b +#define B2500000 0x0000100c +#define B3000000 0x0000100d +#define B3500000 0x0000100e +#define B4000000 0x0000100f +#define CIBAUD 0x100f0000 /* input baud rate */ /* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 -#define EXTPROC 0200000 - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 +#define ISIG 0x00001 +#define ICANON 0x00002 +#define XCASE 0x00004 +#define ECHO 0x00008 +#define ECHOE 0x00010 +#define ECHOK 0x00020 +#define ECHONL 0x00040 +#define NOFLSH 0x00080 +#define TOSTOP 0x00100 +#define ECHOCTL 0x00200 +#define ECHOPRT 0x00400 +#define ECHOKE 0x00800 +#define FLUSHO 0x01000 +#define PENDIN 0x04000 +#define IEXTEN 0x08000 +#define EXTPROC 0x10000 /* tcsetattr uses these */ #define TCSANOW 0 diff --git a/arch/parisc/include/uapi/asm/termios.h b/arch/parisc/include/uapi/asm/termios.h deleted file mode 100644 index aba174f23ef0..000000000000 --- a/arch/parisc/include/uapi/asm/termios.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI_PARISC_TERMIOS_H -#define _UAPI_PARISC_TERMIOS_H - -#include <asm/termbits.h> -#include <asm/ioctls.h> - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - - -#endif /* _UAPI_PARISC_TERMIOS_H */ diff --git a/arch/parisc/include/uapi/asm/types.h b/arch/parisc/include/uapi/asm/types.h deleted file mode 100644 index 28c7d7453b10..000000000000 --- a/arch/parisc/include/uapi/asm/types.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _PARISC_TYPES_H -#define _PARISC_TYPES_H - -#include <asm-generic/int-ll64.h> - -#endif diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 6f68784fea25..664c2d77f776 100644..100755 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -1,7 +1,5 @@ #!/bin/sh # -# arch/parisc/install.sh, derived from arch/i386/boot/install.sh -# # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive # for more details. @@ -17,33 +15,10 @@ # $2 - kernel image file # $3 - kernel map file # $4 - default install path (blank if root directory) -# - -verify () { - if [ ! -f "$1" ]; then - echo "" 1>&2 - echo " *** Missing file: $1" 1>&2 - echo ' *** You need to run "make" before "make install".' 1>&2 - echo "" 1>&2 - exit 1 - fi -} - -# Make sure the files actually exist - -verify "$2" -verify "$3" - -# User may have a custom install script - -if [ -n "${INSTALLKERNEL}" ]; then - if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi - if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi -fi -# Default install +set -e -if [ "$(basename $2)" = "zImage" ]; then +if [ "$(basename $2)" = "vmlinuz" ]; then # Compressed install echo "Installing compressed kernel" base=vmlinuz diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore index c5f676c3c224..bbb90f92d051 100644 --- a/arch/parisc/kernel/.gitignore +++ b/arch/parisc/kernel/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only vmlinux.lds diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 8e5f1ab65c68..9157bc8bdf41 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -3,22 +3,27 @@ # Makefile for arch/parisc/kernel # -extra-y := head.o vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds -obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ - pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ +obj-y := head.o cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ + syscall.o entry.o sys_parisc.o firmware.o \ ptrace.o hardware.o inventory.o drivers.o alternative.o \ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ - process.o processor.o pdc_cons.o pdc_chassis.o unwind.o + process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ + patch.o toc.o toc_asm.o ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_cache.o = -pg -CFLAGS_REMOVE_perf.o = -pg -CFLAGS_REMOVE_unwind.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cache.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_unwind.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) endif +CFLAGS_REMOVE_sys_parisc.o = -Wmissing-prototypes -Wmissing-declarations +CFLAGS_REMOVE_sys_parisc32.o = -Wmissing-prototypes -Wmissing-declarations + obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PA11) += pci-dma.o obj-$(CONFIG_PCI) += pci.o @@ -29,6 +34,17 @@ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o # only supported for PCX-W/U in 64-bit mode at the moment obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) -obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o +obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_FILE) += kexec_file.o + +# vdso +obj-y += vdso.o +obj-$(CONFIG_64BIT) += vdso64/ +obj-y += vdso32/ diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c index bf2274e01a96..25c4d6c3375d 100644 --- a/arch/parisc/kernel/alternative.c +++ b/arch/parisc/kernel/alternative.c @@ -8,6 +8,7 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/alternative.h> +#include <asm/cacheflush.h> #include <linux/module.h> @@ -24,11 +25,29 @@ void __init_or_module apply_alternatives(struct alt_instr *start, { struct alt_instr *entry; int index = 0, applied = 0; - int num_cpus = num_online_cpus(); + int num_cpus = num_present_cpus(); + u16 cond_check; + + cond_check = ALT_COND_ALWAYS | + ((num_cpus == 1) ? ALT_COND_NO_SMP : 0) | + ((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) | + ((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) | + (running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) | + ((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) | + /* + * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit + * set (bit #61, big endian), we have to flush and sync every + * time IO-PDIR is changed in Ike/Astro. + */ + (((boot_cpu_data.cpu_type > pcxw_) && + ((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0)) + ? ALT_COND_NO_IOC_FDC : 0); for (entry = start; entry < end; entry++, index++) { - u32 *from, len, cond, replacement; + u32 *from, replacement; + u16 cond; + s16 len; from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); len = entry->len; @@ -37,26 +56,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start, WARN_ON(!cond); - if (cond != ALT_COND_ALWAYS && no_alternatives) + if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives) continue; pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", index, cond, len, from, replacement); - if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1)) - continue; - if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) - continue; - if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) - continue; - - /* - * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit - * set (bit #61, big endian), we have to flush and sync every - * time IO-PDIR is changed in Ike/Astro. - */ - if ((cond & ALT_COND_NO_IOC_FDC) && - (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) + /* Bounce out if none of the conditions are true. */ + if ((cond & cond_check) == 0) continue; /* Want to replace pdtlb by a pdtlb,l instruction? */ @@ -73,11 +80,19 @@ void __init_or_module apply_alternatives(struct alt_instr *start, if (replacement == INSN_NOP && len > 1) replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ - pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", - index, cond, len, from, replacement); - - /* Replace instruction */ - *from = replacement; + pr_debug("ALTERNATIVE %3d: Cond %2x, Replace %2d instructions to 0x%08x @ 0x%px (%pS)\n", + index, cond, len, replacement, from, from); + + if (len < 0) { + /* Replace multiple instruction by new code */ + u32 *source; + len = -len; + source = (u32 *)((ulong)&entry->replacement + entry->replacement); + memcpy(from, source, 4 * len); + } else { + /* Replace by one instruction */ + *from = replacement; + } applied++; } @@ -94,5 +109,14 @@ void __init apply_alternatives_all(void) apply_alternatives((struct alt_instr *) &__alt_instructions, (struct alt_instr *) &__alt_instructions_end, NULL); + if (cache_info.dc_size == 0 && cache_info.ic_size == 0) { + pr_info("alternatives: optimizing cache-flushes.\n"); + static_branch_disable(&parisc_has_cache); + } + if (cache_info.dc_size == 0) + static_branch_disable(&parisc_has_dcache); + if (cache_info.ic_size == 0) + static_branch_disable(&parisc_has_icache); + set_kernel_text_rw(0); } diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index dfff8a0d6fd1..3de4b5933b10 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract @@ -11,21 +12,8 @@ * Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define COMPILE_OFFSETS #include <linux/types.h> #include <linux/sched.h> @@ -33,19 +21,17 @@ #include <linux/ptrace.h> #include <linux/hardirq.h> #include <linux/kbuild.h> +#include <linux/pgtable.h> -#include <asm/pgtable.h> +#include <asm/assembly.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/pdc.h> +#include <uapi/asm/sigcontext.h> +#include <asm/ucontext.h> +#include <asm/rt_sigframe.h> #include <linux/uaccess.h> - -#ifdef CONFIG_64BIT -#define FRAME_SIZE 128 -#else -#define FRAME_SIZE 64 -#endif -#define FRAME_ALIGN 64 +#include "signal32.h" /* Add FRAME_SIZE to the size x and align it to y. All definitions * that use align_frame will include space for a frame. @@ -54,14 +40,12 @@ int main(void) { - DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); - DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); - DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); - DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); - DEFINE(TASK_MM, offsetof(struct task_struct, mm)); - DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality)); - DEFINE(TASK_PID, offsetof(struct task_struct, pid)); + DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); +#ifdef CONFIG_SMP + DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); +#endif + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); + DEFINE(TASK_PAGEFAULT_DISABLED, offsetof(struct task_struct, pagefault_disabled)); BLANK(); DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs)); DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0])); @@ -149,10 +133,6 @@ int main(void) DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr)); DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); BLANK(); - DEFINE(TASK_SZ, sizeof(struct task_struct)); - /* TASK_SZ_ALGN includes space for a stack frame. */ - DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN)); - BLANK(); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2])); @@ -237,18 +217,21 @@ int main(void) DEFINE(PT_IIR, offsetof(struct pt_regs, iir)); DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); - DEFINE(PT_SIZE, sizeof(struct pt_regs)); /* PT_SZ_ALGN includes space for a stack frame. */ DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN)); BLANK(); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); - DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); - DEFINE(THREAD_SZ, sizeof(struct thread_info)); - /* THREAD_SZ_ALGN includes space for a stack frame. */ - DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN)); + DEFINE(TI_PRE_COUNT, offsetof(struct task_struct, thread_info.preempt_count)); + BLANK(); + DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE); + DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE); +#ifdef CONFIG_64BIT + DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32); + DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32); +#else + DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE); + DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE); +#endif BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); @@ -275,13 +258,14 @@ int main(void) BLANK(); DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); + DEFINE(TIF_32BIT_PA_BIT, 31-TIF_32BIT); + BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); - DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); @@ -294,6 +278,8 @@ int main(void) * and kernel data on physical huge pages */ #ifdef CONFIG_HUGETLB_PAGE DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); +#elif !defined(CONFIG_64BIT) + DEFINE(HUGEPAGE_SIZE, 4*1024*1024); #else DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); #endif diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c index 9eb47b2225d2..375cd73b5281 100644 --- a/arch/parisc/kernel/audit.c +++ b/arch/parisc/kernel/audit.c @@ -40,20 +40,21 @@ int audit_classify_arch(int arch) int audit_classify_syscall(int abi, unsigned syscall) { -#ifdef CONFIG_COMPAT - extern int parisc32_classify_syscall(unsigned); - if (abi == AUDIT_ARCH_PARISC) - return parisc32_classify_syscall(syscall); -#endif switch (syscall) { case __NR_open: - return 2; + return AUDITSC_OPEN; case __NR_openat: - return 3; + return AUDITSC_OPENAT; case __NR_execve: - return 5; + return AUDITSC_EXECVE; + case __NR_openat2: + return AUDITSC_OPENAT2; default: - return 0; +#ifdef CONFIG_COMPAT + if (abi == AUDIT_ARCH_PARISC) + return AUDITSC_COMPAT; +#endif + return AUDITSC_NATIVE; } } diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 804880efa11e..4c5240d3a3c7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -19,68 +19,98 @@ #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/sched/mm.h> +#include <linux/syscalls.h> +#include <linux/vmalloc.h> #include <asm/pdc.h> #include <asm/cache.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/shmparam.h> +#include <asm/mmu_context.h> +#include <asm/cachectl.h> -int split_tlb __read_mostly; -int dcache_stride __read_mostly; -int icache_stride __read_mostly; +#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE) + +/* + * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number + * of page flushes done flush_cache_page_if_present. There are some + * pros and cons in using this option. It may increase the risk of + * random segmentation faults. + */ +#define CONFIG_FLUSH_PAGE_ACCESSED 0 + +int split_tlb __ro_after_init; +int dcache_stride __ro_after_init; +int icache_stride __ro_after_init; EXPORT_SYMBOL(dcache_stride); +/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); +void flush_data_cache_local(void *); /* flushes local data-cache only */ +void flush_instruction_cache_local(void); /* flushes local code-cache only */ +static void flush_kernel_dcache_page_addr(const void *addr); -/* On some machines (e.g. ones with the Merced bus), there can be +/* On some machines (i.e., ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed - * by software. We put a spinlock around all TLB flushes to - * ensure this. + * by software. We need a spinlock around all TLB flushes to ensure + * this. */ -DEFINE_SPINLOCK(pa_tlb_lock); +DEFINE_SPINLOCK(pa_tlb_flush_lock); -struct pdc_cache_info cache_info __read_mostly; +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) +int pa_serialize_tlb_flushes __ro_after_init; +#endif + +struct pdc_cache_info cache_info __ro_after_init; #ifndef CONFIG_PA20 -static struct pdc_btlb_info btlb_info __read_mostly; +struct pdc_btlb_info btlb_info; #endif -#ifdef CONFIG_SMP -void -flush_data_cache(void) +DEFINE_STATIC_KEY_TRUE(parisc_has_cache); +DEFINE_STATIC_KEY_TRUE(parisc_has_dcache); +DEFINE_STATIC_KEY_TRUE(parisc_has_icache); + +static void cache_flush_local_cpu(void *dummy) { - on_each_cpu(flush_data_cache_local, NULL, 1); + if (static_branch_likely(&parisc_has_icache)) + flush_instruction_cache_local(); + if (static_branch_likely(&parisc_has_dcache)) + flush_data_cache_local(NULL); } -void -flush_instruction_cache(void) + +void flush_cache_all_local(void) { - on_each_cpu(flush_instruction_cache_local, NULL, 1); + cache_flush_local_cpu(NULL); } -#endif -void -flush_cache_all_local(void) +void flush_cache_all(void) { - flush_instruction_cache_local(NULL); - flush_data_cache_local(NULL); + if (static_branch_likely(&parisc_has_cache)) + on_each_cpu(cache_flush_local_cpu, NULL, 1); } -EXPORT_SYMBOL(flush_cache_all_local); -/* Virtual address of pfn. */ +static inline void flush_data_cache(void) +{ + if (static_branch_likely(&parisc_has_dcache)) + on_each_cpu(flush_data_cache_local, NULL, 1); +} + + +/* Kernel virtual address of pfn. */ #define pfn_va(pfn) __va(PFN_PHYS(pfn)) -void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +void __update_cache(pte_t pte) { - unsigned long pfn = pte_pfn(*ptep); - struct page *page; + unsigned long pfn = pte_pfn(pte); + struct folio *folio; + unsigned int nr; /* We don't have pte special. As a result, we can be called with an invalid pfn and we don't need to flush the kernel dcache page. @@ -88,13 +118,17 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) if (!pfn_valid(pfn)) return; - page = pfn_to_page(pfn); - if (page_mapping_file(page) && - test_bit(PG_dcache_dirty, &page->flags)) { - flush_kernel_dcache_page_addr(pfn_va(pfn)); - clear_bit(PG_dcache_dirty, &page->flags); + folio = page_folio(pfn_to_page(pfn)); + pfn = folio_pfn(folio); + nr = folio_nr_pages(folio); + if (folio_flush_mapping(folio) && + test_bit(PG_dcache_dirty, &folio->flags.f)) { + while (nr--) + flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); + clear_bit(PG_dcache_dirty, &folio->flags.f); } else if (parisc_requires_coherency()) - flush_kernel_dcache_page_addr(pfn_va(pfn)); + while (nr--) + flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); } void @@ -106,11 +140,13 @@ show_cache_info(struct seq_file *m) cache_info.ic_size/1024 ); if (cache_info.dc_loop != 1) snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); - seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", + seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n", cache_info.dc_size/1024, (cache_info.dc_conf.cc_wt ? "WT":"WB"), (cache_info.dc_conf.cc_sh ? ", shared I/D":""), - ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); + ((cache_info.dc_loop == 1) ? "direct mapped" : buf), + cache_info.dc_conf.cc_alias + ); seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", cache_info.it_size, cache_info.dt_size, @@ -240,11 +276,9 @@ parisc_cache_init(void) icache_stride = CAFL_STRIDE(cache_info.ic_conf); #undef CAFL_STRIDE -#ifndef CONFIG_PA20 - if (pdc_btlb_info(&btlb_info) < 0) { - memset(&btlb_info, 0, sizeof btlb_info); - } -#endif + /* stride needs to be non-zero, otherwise cache flushes will not work */ + WARN_ON(cache_info.dc_size && dcache_stride == 0); + WARN_ON(cache_info.ic_size && icache_stride == 0); if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) { @@ -255,7 +289,7 @@ parisc_cache_init(void) } } -void __init disable_sr_hashing(void) +void disable_sr_hashing(void) { int srhash_type, retval; unsigned long space_bits; @@ -297,6 +331,20 @@ static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long physaddr) { + if (!static_branch_likely(&parisc_has_cache)) + return; + + /* + * The TLB is the engine of coherence on parisc. The CPU is + * entitled to speculate any page with a TLB mapping, so here + * we kill the mapping then flush the page along a special flush + * only alias mapping. This guarantees that the page is no-longer + * in the cache for any process and nor may it be speculatively + * read in (until the user or kernel specifically accesses it, + * of course). + */ + flush_tlb_page(vma, vmaddr); + preempt_disable(); flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -304,86 +352,211 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static inline void -__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, - unsigned long physaddr) +static void flush_kernel_dcache_page_addr(const void *addr) { + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; + + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + + /* Use tmpalias flush to prevent data cache move-in */ preempt_disable(); - purge_dcache_page_asm(physaddr, vmaddr); - if (vma->vm_flags & VM_EXEC) - flush_icache_page_asm(physaddr, vmaddr); + flush_dcache_page_asm(__pa(vaddr), vaddr); preempt_enable(); } -void flush_dcache_page(struct page *page) +static void flush_kernel_icache_page_addr(const void *addr) +{ + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; + + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + + /* Use tmpalias flush to prevent instruction cache move-in */ + preempt_disable(); + flush_icache_page_asm(__pa(vaddr), vaddr); + preempt_enable(); +} + +void kunmap_flush_on_unmap(const void *addr) +{ + flush_kernel_dcache_page_addr(addr); +} +EXPORT_SYMBOL(kunmap_flush_on_unmap); + +void flush_icache_pages(struct vm_area_struct *vma, struct page *page, + unsigned int nr) +{ + void *kaddr = page_address(page); + + for (;;) { + flush_kernel_dcache_page_addr(kaddr); + flush_kernel_icache_page_addr(kaddr); + if (--nr == 0) + break; + kaddr += PAGE_SIZE; + } +} + +/* + * Walk page directory for MM to find PTEP pointer for address ADDR. + */ +static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) +{ + pte_t *ptep = NULL; + pgd_t *pgd = mm->pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + if (!pgd_none(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (!p4d_none(*p4d)) { + pud = pud_offset(p4d, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) + ptep = pte_offset_map(pmd, addr); + } + } + } + return ptep; +} + +static inline bool pte_needs_cache_flush(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) + == (_PAGE_PRESENT | _PAGE_ACCESSED); +} + +/* + * Return user physical address. Returns 0 if page is not present. + */ +static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags, space, pgd, prot, pa; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + /* Save context */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + + /* Set context for lpa_user */ + switch_mm_irqs_off(NULL, mm, NULL); + pa = lpa_user(addr); + + /* Restore previous context */ +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + + return pa; +} + +void flush_dcache_folio(struct folio *folio) { - struct address_space *mapping = page_mapping_file(page); - struct vm_area_struct *mpnt; - unsigned long offset; + struct address_space *mapping = folio_flush_mapping(folio); + struct vm_area_struct *vma; unsigned long addr, old_addr = 0; + void *kaddr; + unsigned long count = 0; + unsigned long i, nr, flags; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { - set_bit(PG_dcache_dirty, &page->flags); + set_bit(PG_dcache_dirty, &folio->flags.f); return; } - flush_kernel_dcache_page(page); + nr = folio_nr_pages(folio); + kaddr = folio_address(folio); + for (i = 0; i < nr; i++) + flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE); if (!mapping) return; - pgoff = page->index; + pgoff = folio->index; - /* We have carefully arranged in arch_get_unmapped_area() that + /* + * We have carefully arranged in arch_get_unmapped_area() that * *any* mappings of a file are always congruently mapped (whether * declared as MAP_PRIVATE or MAP_SHARED), so we only need - * to flush one address here for them all to become coherent */ - - flush_dcache_mmap_lock(mapping); - vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { - offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - addr = mpnt->vm_start + offset; - - /* The TLB is the engine of coherence on parisc: The - * CPU is entitled to speculate any page with a TLB - * mapping, so here we kill the mapping then flush the - * page along a special flush only alias mapping. - * This guarantees that the page is no-longer in the - * cache for any process and nor may it be - * speculatively read in (until the user or kernel - * specifically accesses it, of course) */ - - flush_tlb_page(mpnt, addr); + * to flush one address here for them all to become coherent + * on machines that support equivalent aliasing + */ + flush_dcache_mmap_lock_irqsave(mapping, flags); + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { + unsigned long offset = pgoff - vma->vm_pgoff; + unsigned long pfn = folio_pfn(folio); + + addr = vma->vm_start; + nr = folio_nr_pages(folio); + if (offset > -nr) { + pfn -= offset; + nr += offset; + } else { + addr += offset * PAGE_SIZE; + } + if (addr + nr * PAGE_SIZE > vma->vm_end) + nr = (vma->vm_end - addr) / PAGE_SIZE; + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - __flush_cache_page(mpnt, addr, page_to_phys(page)); + != (addr & (SHM_COLOUR - 1))) { + for (i = 0; i < nr; i++) + __flush_cache_page(vma, + addr + i * PAGE_SIZE, + (pfn + i) * PAGE_SIZE); + /* + * Software is allowed to have any number + * of private mappings to a page. + */ + if (!(vma->vm_flags & VM_SHARED)) + continue; if (old_addr) - printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file); - old_addr = addr; + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, vma->vm_file); + if (nr == folio_nr_pages(folio)) + old_addr = addr; } + WARN_ON(++count == 4096); } - flush_dcache_mmap_unlock(mapping); + flush_dcache_mmap_unlock_irqrestore(mapping, flags); } -EXPORT_SYMBOL(flush_dcache_page); +EXPORT_SYMBOL(flush_dcache_folio); /* Defined in arch/parisc/kernel/pacache.S */ EXPORT_SYMBOL(flush_kernel_dcache_range_asm); -EXPORT_SYMBOL(flush_kernel_dcache_page_asm); -EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ -static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; +static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD; #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ -static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; +static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL; void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; - unsigned long size, start; - unsigned long threshold; + unsigned long size; + unsigned long threshold, threshold2; alltime = mfctl(16); flush_data_cache(); @@ -397,11 +570,16 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - threshold = L1_CACHE_ALIGN(size * alltime / rangetime); - if (threshold > cache_info.dc_size) - threshold = cache_info.dc_size; - if (threshold) - parisc_cache_flush_threshold = threshold; + threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime)); + pr_info("Calculated flush threshold is %lu KiB\n", + threshold/1024); + + /* + * The threshold computed above isn't very reliable. The following + * heuristic works reasonably well on c8000/rp3440. + */ + threshold2 = cache_info.dc_size * num_online_cpus(); + parisc_cache_flush_threshold = threshold2; printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", parisc_cache_flush_threshold/1024); @@ -416,14 +594,9 @@ void __init parisc_setup_cache_timing(void) goto set_tlb_threshold; } - size = 0; - start = (unsigned long) _text; + size = (unsigned long)_end - (unsigned long)_text; rangetime = mfctl(16); - while (start < (unsigned long) _end) { - flush_tlb_kernel_range(start, start + PAGE_SIZE); - start += PAGE_SIZE; - size += PAGE_SIZE; - } + flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end); rangetime = mfctl(16) - rangetime; alltime = mfctl(16); @@ -438,8 +611,7 @@ void __init parisc_setup_cache_timing(void) threshold/1024); set_tlb_threshold: - if (threshold > parisc_tlb_flush_threshold) - parisc_tlb_flush_threshold = threshold; + parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD); printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } @@ -448,30 +620,58 @@ extern void purge_kernel_dcache_page_asm(unsigned long); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); -void flush_kernel_dcache_page_addr(void *addr) +static void flush_cache_page_if_present(struct vm_area_struct *vma, + unsigned long vmaddr) { - unsigned long flags; +#if CONFIG_FLUSH_PAGE_ACCESSED + bool needs_flush = false; + pte_t *ptep, pte; + + ptep = get_ptep(vma->vm_mm, vmaddr); + if (ptep) { + pte = ptep_get(ptep); + needs_flush = pte_needs_cache_flush(pte); + pte_unmap(ptep); + } + if (needs_flush) + __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); +#else + struct mm_struct *mm = vma->vm_mm; + unsigned long physaddr = get_upa(mm, vmaddr); + + if (physaddr) + __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); +#endif +} - flush_kernel_dcache_page_asm(addr); - purge_tlb_start(flags); - pdtlb_kernel(addr); - purge_tlb_end(flags); +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *kto, *kfrom; + + kfrom = kmap_local_page(from); + kto = kmap_local_page(to); + __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); + copy_page_asm(kto, kfrom); + kunmap_local(kto); + kunmap_local(kfrom); } -EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) { - /* Copy using kernel mapping. No coherency is needed (all in - kunmap) for the `to' page. However, the `from' page needs to - be flushed through a mapping equivalent to the user mapping - before it can be accessed through the kernel mapping. */ - preempt_disable(); - flush_dcache_page_asm(__pa(vfrom), vaddr); - copy_page_asm(vto, vfrom); - preempt_enable(); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); + memcpy(dst, src, len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst)); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long user_vaddr, void *dst, void *src, int len) +{ + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); + memcpy(dst, src, len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src)); } -EXPORT_SYMBOL(copy_user_page); /* __flush_tlb_range() * @@ -493,165 +693,218 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, but cause a purge request to be broadcast to other TLBs. */ while (start < end) { purge_tlb_start(flags); - mtsp(sid, 1); - pdtlb(start); - pitlb(start); + mtsp(sid, SR_TEMP1); + pdtlb(SR_TEMP1, start); + pitlb(SR_TEMP1, start); purge_tlb_end(flags); start += PAGE_SIZE; } return 0; } -static void cacheflush_h_tmp_function(void *dummy) +static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - flush_cache_all_local(); -} + unsigned long addr; -void flush_cache_all(void) -{ - on_each_cpu(cacheflush_h_tmp_function, NULL, 1); + for (addr = start; addr < end; addr += PAGE_SIZE) + flush_cache_page_if_present(vma, addr); } static inline unsigned long mm_total_size(struct mm_struct *mm) { struct vm_area_struct *vma; unsigned long usize = 0; + VMA_ITERATOR(vmi, mm, 0); - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) { + if (usize >= parisc_cache_flush_threshold) + break; usize += vma->vm_end - vma->vm_start; - return usize; -} - -static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) -{ - pte_t *ptep = NULL; - - if (!pgd_none(*pgd)) { - pud_t *pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) - ptep = pte_offset_map(pmd, addr); - } } - return ptep; + return usize; } void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; - pgd_t *pgd; - - /* Flushing the whole cache on each cpu takes forever on - rp3440, etc. So, avoid it if the mm isn't too big. */ - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - mm_total_size(mm) >= parisc_cache_flush_threshold) { - if (mm->context) - flush_tlb_all(); + VMA_ITERATOR(vmi, mm, 0); + + /* + * Flushing the whole cache on each cpu takes forever on + * rp3440, etc. So, avoid it if the mm isn't too big. + * + * Note that we must flush the entire cache on machines + * with aliasing caches to prevent random segmentation + * faults. + */ + if (!parisc_requires_coherency() + || mm_total_size(mm) >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_all(); flush_cache_all(); return; } - if (mm->context == mfsp(3)) { - for (vma = mm->mmap; vma; vma = vma->vm_next) { - flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(vma->vm_start, vma->vm_end); - flush_tlb_range(vma, vma->vm_start, vma->vm_end); - } + /* Flush mm */ + for_each_vma(vmi, vma) + flush_cache_pages(vma, vma->vm_start, vma->vm_end); +} + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + if (!parisc_requires_coherency() + || end - start >= parisc_cache_flush_threshold) { + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) + return; + flush_tlb_range(vma, start, end); + if (vma->vm_flags & VM_EXEC) + flush_cache_all(); + else + flush_data_cache(); return; } - pgd = mm->pgd; - for (vma = mm->mmap; vma; vma = vma->vm_next) { - unsigned long addr; + flush_cache_pages(vma, start & PAGE_MASK, end); +} - for (addr = vma->vm_start; addr < vma->vm_end; - addr += PAGE_SIZE) { - unsigned long pfn; - pte_t *ptep = get_ptep(pgd, addr); - if (!ptep) - continue; - pfn = pte_pfn(*ptep); - if (!pfn_valid(pfn)) - continue; - if (unlikely(mm->context)) { - flush_tlb_page(vma, addr); - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } else { - __purge_cache_page(vma, addr, PFN_PHYS(pfn)); - } - } - } +void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) +{ + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } -void flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) +void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { - pgd_t *pgd; - unsigned long addr; + if (!PageAnon(page)) + return; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - end - start >= parisc_cache_flush_threshold) { - if (vma->vm_mm->context) - flush_tlb_range(vma, start, end); + __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); +} + +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + + if (!pte_young(pte)) + return 0; + set_pte(ptep, pte_mkold(pte)); +#if CONFIG_FLUSH_PAGE_ACCESSED + __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); +#endif + return 1; +} + +/* + * After a PTE is cleared, we have no way to flush the cache for + * the physical page. On PA8800 and PA8900 processors, these lines + * can cause random cache corruption. Thus, we must flush the cache + * as well as the TLB when clearing a PTE that's valid. + */ +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + struct mm_struct *mm = (vma)->vm_mm; + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + else if (pte_accessible(mm, pte)) + flush_tlb_page(vma, addr); + + return pte; +} + +/* + * The physical address for pages in the ioremap case can be obtained + * from the vm_struct struct. I wasn't able to successfully handle the + * vmalloc and vmap cases. We have an array of struct page pointers in + * the uninitialized vmalloc case but the flush failed using page_to_pfn. + */ +void flush_cache_vmap(unsigned long start, unsigned long end) +{ + unsigned long addr, physaddr; + struct vm_struct *vm; + + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + + if (end - start >= parisc_cache_flush_threshold) { flush_cache_all(); return; } - if (vma->vm_mm->context == mfsp(3)) { - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); - flush_tlb_range(vma, start, end); + if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) { + flush_cache_all(); return; } - pgd = vma->vm_mm->pgd; - for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { - unsigned long pfn; - pte_t *ptep = get_ptep(pgd, addr); - if (!ptep) - continue; - pfn = pte_pfn(*ptep); - if (pfn_valid(pfn)) { - if (unlikely(vma->vm_mm->context)) { - flush_tlb_page(vma, addr); - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } else { - __purge_cache_page(vma, addr, PFN_PHYS(pfn)); - } + vm = find_vm_area((void *)start); + if (!vm) { + flush_cache_all(); + return; + } + + /* The physical addresses of IOREMAP regions are contiguous */ + if (vm->flags & VM_IOREMAP) { + physaddr = vm->phys_addr; + for (addr = start; addr < end; addr += PAGE_SIZE) { + preempt_disable(); + flush_dcache_page_asm(physaddr, start); + flush_icache_page_asm(physaddr, start); + preempt_enable(); + physaddr += PAGE_SIZE; } + return; } + + flush_cache_all(); } +EXPORT_SYMBOL(flush_cache_vmap); -void -flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) +/* + * The vm_struct has been retired and the page table is set up. The + * last page in the range is a guard page. Its physical address can't + * be determined using lpa, so there is no way to flush the range + * using flush_dcache_page_asm. + */ +void flush_cache_vunmap(unsigned long start, unsigned long end) { - if (pfn_valid(pfn)) { - if (likely(vma->vm_mm->context)) { - flush_tlb_page(vma, vmaddr); - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } else { - __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); - } - } + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + flush_data_cache(); } +EXPORT_SYMBOL(flush_cache_vunmap); +/* + * On systems with PA8800/PA8900 processors, there is no way to flush + * a vmap range other than using the architected loop to flush the + * entire cache. The page directory is not set up, so we can't use + * fdc, etc. FDCE/FICE don't work to flush a portion of the cache. + * L2 is physically indexed but FDCE/FICE instructions in virtual + * mode output their virtual address on the core bus, not their + * real address. As a result, the L2 cache index formed from the + * virtual address will most likely not be the same as the L2 index + * formed from the real address. + */ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -660,14 +913,67 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + /* Ensure DMA is complete */ + asm_syncdma(); + + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); + + +SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, + unsigned int, cache) +{ + unsigned long start, end; + ASM_EXCEPTIONTABLE_VAR(error); + + if (bytes == 0) + return 0; + if (!access_ok((void __user *) addr, bytes)) + return -EFAULT; + + end = addr + bytes; + + if (cache & DCACHE) { + start = addr; + __asm__ __volatile__ ( +#ifdef CONFIG_64BIT + "1: cmpb,*<<,n %0,%2,1b\n" +#else + "1: cmpb,<<,n %0,%2,1b\n" +#endif + " fic,m %3(%4,%0)\n" + "2: sync\n" + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") + : "+r" (start), "+r" (error) + : "r" (end), "r" (dcache_stride), "i" (SR_USER)); + } + + if (cache & ICACHE && error == 0) { + start = addr; + __asm__ __volatile__ ( +#ifdef CONFIG_64BIT + "1: cmpb,*<<,n %0,%2,1b\n" +#else + "1: cmpb,<<,n %0,%2,1b\n" +#endif + " fdc,m %3(%4,%0)\n" + "2: sync\n" + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") + : "+r" (start), "+r" (error) + : "r" (end), "r" (icache_stride), "i" (SR_USER)); + } + + return error; +} diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c index 20c39c9d86a9..3ac53f1ab860 100644 --- a/arch/parisc/kernel/compat_audit.c +++ b/arch/parisc/kernel/compat_audit.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/audit_arch.h> #include <asm/unistd.h> unsigned int parisc32_dir_class[] = { @@ -25,17 +26,3 @@ unsigned int parisc32_signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; - -int parisc32_classify_syscall(unsigned syscall) -{ - switch (syscall) { - case __NR_open: - return 2; - case __NR_openat: - return 3; - case __NR_execve: - return 5; - default: - return 1; - } -} diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 5eb979d04b90..8d23fe42b0ce 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers.c * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Copyright (c) 1999 The Puffin Group * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard - * Copyright (c) 2001 Helge Deller <deller@gmx.de> + * Copyright (c) 2001-2023 Helge Deller <deller@gmx.de> * Copyright (c) 2001,2002 Ryan Bradetich * Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org> * @@ -34,13 +30,15 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/export.h> +#include <linux/dma-map-ops.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/pdc.h> #include <asm/parisc-device.h> +#include <asm/ropes.h> /* See comments in include/asm-parisc/pci.h */ -const struct dma_map_ops *hppa_dma_ops __read_mostly; +const struct dma_map_ops *hppa_dma_ops __ro_after_init; EXPORT_SYMBOL(hppa_dma_ops); static struct device root = { @@ -76,13 +74,13 @@ static int descend_children(struct device * dev, void * data) } /** - * for_each_padev - Iterate over all devices in the tree - * @fn: Function to call for each device. - * @data: Data to pass to the called function. + * for_each_padev - Iterate over all devices in the tree + * @fn: Function to call for each device. + * @data: Data to pass to the called function. * - * This performs a depth-first traversal of the tree, calling the - * function passed for each node. It calls the function for parents - * before children. + * This performs a depth-first traversal of the tree, calling the + * function passed for each node. It calls the function for parents + * before children. */ static int for_each_padev(int (*fn)(struct device *, void *), void * data) @@ -99,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data) * @driver: the PA-RISC driver to try * @dev: the PA-RISC device to try */ -static int match_device(struct parisc_driver *driver, struct parisc_device *dev) +static int match_device(const struct parisc_driver *driver, struct parisc_device *dev) { const struct parisc_device_id *ids; @@ -135,14 +133,13 @@ static int parisc_driver_probe(struct device *dev) return rc; } -static int __exit parisc_driver_remove(struct device *dev) +static void __exit parisc_driver_remove(struct device *dev) { struct parisc_device *pa_dev = to_parisc_device(dev); struct parisc_driver *pa_drv = to_parisc_driver(dev->driver); + if (pa_drv->remove) pa_drv->remove(pa_dev); - - return 0; } @@ -257,9 +254,33 @@ static struct parisc_device *find_device_by_addr(unsigned long hpa) return ret ? d.dev : NULL; } +static int __init is_IKE_device(struct device *dev, void *data) +{ + struct parisc_device *pdev = to_parisc_device(dev); + + if (!check_dev(dev)) + return 0; + if (pdev->id.hw_type != HPHW_BCPORT) + return 0; + if (IS_IKE(pdev) || + (pdev->id.hversion == REO_MERCED_PORT) || + (pdev->id.hversion == REOG_MERCED_PORT)) { + return 1; + } + return 0; +} + +int __init machine_has_merced_bus(void) +{ + int ret; + + ret = for_each_padev(is_IKE_device, NULL); + return ret ? 1 : 0; +} + /** * find_pa_parent_type - Find a parent of a specific type - * @dev: The device to start searching from + * @padev: The device to start searching from * @type: The device type to search for. * * Walks up the device tree looking for a device of the specified type. @@ -323,8 +344,8 @@ static char *print_hwpath(struct hardware_path *path, char *output) /** * print_pa_hwpath - Returns hardware path for PA devices - * dev: The device to return the path for - * output: Pointer to a previously-allocated array to place the path in. + * @dev: The device to return the path for + * @output: Pointer to a previously-allocated array to place the path in. * * This function fills in the output array with a human-readable path * to a PA device. This string is compatible with that used by PDC, and @@ -358,8 +379,8 @@ EXPORT_SYMBOL(get_pci_node_path); /** * print_pci_hwpath - Returns hardware path for PCI devices - * dev: The device to return the path for - * output: Pointer to a previously-allocated array to place the path in. + * @dev: The device to return the path for + * @output: Pointer to a previously-allocated array to place the path in. * * This function fills in the output array with a human-readable path * to a PCI device. This string is compatible with that used by PDC, and @@ -394,7 +415,8 @@ static void setup_bus_id(struct parisc_device *padev) dev_set_name(&padev->dev, name); } -struct parisc_device * __init create_tree_node(char id, struct device *parent) +static struct parisc_device * __init create_tree_node(char id, + struct device *parent) { struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) @@ -499,7 +521,6 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev->id.hversion_rev = iodc_data[1] & 0x0f; dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) | (iodc_data[5] << 8) | iodc_data[6]; - dev->hpa.name = parisc_pathname(dev); dev->hpa.start = hpa; /* This is awkward. The STI spec says that gfx devices may occupy * 32MB or 64MB. Unfortunately, we don't know how to tell whether @@ -513,10 +534,10 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev->hpa.end = hpa + 0xfff; } dev->hpa.flags = IORESOURCE_MEM; - name = parisc_hardware_description(&dev->id); - if (name) { - strlcpy(dev->name, name, sizeof(dev->name)); - } + dev->hpa.name = dev->name; + name = parisc_hardware_description(&dev->id) ? : "unknown"; + snprintf(dev->name, sizeof(dev->name), "%s [%s]", + name, parisc_pathname(dev)); /* Silently fail things like mouse ports which are subsumed within * the keyboard controller @@ -527,12 +548,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) return dev; } -static int parisc_generic_match(struct device *dev, struct device_driver *drv) +static int parisc_generic_match(struct device *dev, const struct device_driver *drv) { return match_device(to_parisc_driver(drv), to_parisc_device(dev)); } -static ssize_t make_modalias(struct device *dev, char *buf) +static ssize_t make_modalias(const struct device *dev, char *buf) { const struct parisc_device *padev = to_parisc_device(dev); const struct parisc_device_id *id = &padev->id; @@ -542,7 +563,7 @@ static ssize_t make_modalias(struct device *dev, char *buf) (u32)id->sversion); } -static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) +static int parisc_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct parisc_device *padev; char modalias[40]; @@ -597,7 +618,7 @@ static struct attribute *parisc_device_attrs[] = { }; ATTRIBUTE_GROUPS(parisc_device); -struct bus_type parisc_bus_type = { +const struct bus_type parisc_bus_type = { .name = "parisc", .match = parisc_generic_match, .uevent = parisc_uevent, @@ -721,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath) }; if (device_for_each_child(parent, &recurse_data, descend_children)) - /* nothing */; + { /* nothing */ } return d.dev; } @@ -751,8 +772,8 @@ EXPORT_SYMBOL(hwpath_to_device); /** * device_to_hwpath - Populates the hwpath corresponding to the given device. - * @param dev the target device - * @param path pointer to a previously allocated hwpath struct to be filled in + * @dev: the target device + * @path: pointer to a previously allocated hwpath struct to be filled in */ void device_to_hwpath(struct device *dev, struct hardware_path *path) { @@ -789,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath); static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); -static void walk_lower_bus(struct parisc_device *dev) +static void __init walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; @@ -862,15 +883,13 @@ void __init walk_central_bus(void) &root); } -static void print_parisc_device(struct parisc_device *dev) +static __init void print_parisc_device(struct parisc_device *dev) { - char hw_path[64]; - static int count; + static int count __initdata; - print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, - dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); + pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }", + ++count, dev->name, &(dev->hpa.start), dev->id.hw_type, + dev->id.hversion, dev->id.sversion, dev->id.hversion_rev); if (dev->num_addrs) { int k; @@ -906,10 +925,10 @@ static __init void qemu_header(void) pr_info("#define PARISC_MODEL \"%s\"\n\n", boot_cpu_data.pdc.sys_model_name); - pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, " - "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n", #define p ((unsigned long *)&boot_cpu_data.pdc.model) - p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); + pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, " + "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]); #undef p pr_info("#define PARISC_PDC_VERSION 0x%04lx\n\n", @@ -976,6 +995,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) struct pdc_system_map_mod_info pdc_mod_info; struct pdc_module_path mod_path; + memset(&iodc_data, 0, sizeof(iodc_data)); status = pdc_iodc_read(&count, hpa, 0, &iodc_data, sizeof(iodc_data)); if (status != PDC_OK) { @@ -985,11 +1005,19 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) pr_info("\n"); + /* Prevent hung task messages when printing on serial console */ + cond_resched(); + pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n", hpa, parisc_hardware_description(&dev->id)); mod_index = 0; do { + /* initialize device path for old machines */ + memset(&mod_path, 0xff, sizeof(mod_path)); + get_node_path(dev->dev.parent, &mod_path.path); + mod_path.path.mod = dev->hw_path; + memset(&pdc_mod_info, 0, sizeof(pdc_mod_info)); status = pdc_system_map_find_mods(&pdc_mod_info, &mod_path, mod_index++); } while (status == PDC_OK && pdc_mod_info.mod_addr != hpa); @@ -1015,11 +1043,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) (unsigned char)mod_path.path.bc[3], (unsigned char)mod_path.path.bc[4], (unsigned char)mod_path.path.bc[5]); - pr_cont(".mod = 0x%x ", mod_path.path.mod); - pr_cont(" },\n"); - pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n", - mod_path.layers[0], mod_path.layers[1], mod_path.layers[2], - mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]); + pr_cont(".mod = 0x%x }\n", mod_path.path.mod); pr_cont("};\n"); pr_info("static struct pdc_iodc iodc_data_hpa_%08lx = {\n", hpa); @@ -1039,8 +1063,6 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) DO(checksum); DO(length); #undef DO - pr_cont("\t/* pad: 0x%04x, 0x%04x */\n", - iodc_data.pad[0], iodc_data.pad[1]); pr_cont("};\n"); pr_info("#define HPA_%08lx_num_addr %d\n", hpa, dev->num_addrs); @@ -1059,7 +1081,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) -static int print_one_device(struct device * dev, void * data) +static __init int print_one_device(struct device * dev, void * data) { struct parisc_device * pdev = to_parisc_device(dev); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index d5eb19efa65b..e04c5d806c10 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * @@ -6,20 +7,6 @@ * Copyright (C) 1999 SuSE GmbH Nuernberg * Copyright (C) 2000 Hewlett-Packard (John Marvin) * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <asm/asm-offsets.h> @@ -32,15 +19,16 @@ #include <asm/psw.h> #include <asm/cache.h> /* for L1_CACHE_SHIFT */ #include <asm/assembly.h> /* for LDREG/STREG defines */ -#include <asm/pgtable.h> #include <asm/signal.h> #include <asm/unistd.h> #include <asm/ldcw.h> #include <asm/traps.h> #include <asm/thread_info.h> #include <asm/alternative.h> +#include <asm/spinlock_types.h> #include <linux/linkage.h> +#include <linux/pgtable.h> #ifdef CONFIG_64BIT .level 2.0w @@ -48,14 +36,27 @@ .level 2.0 #endif - .import pa_tlb_lock,data - .macro load_pa_tlb_lock reg -#if __PA_LDCW_ALIGNMENT > 4 - load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg - depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg +/* + * We need seven instructions after a TLB insert for it to take effect. + * The PA8800/PA8900 processors are an exception and need 12 instructions. + * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one. + */ +#ifdef CONFIG_64BIT +#define NUM_PIPELINE_INSNS 12 #else - load32 PA(pa_tlb_lock), \reg +#define NUM_PIPELINE_INSNS 7 #endif + + /* Insert num nops */ + .macro insert_nops num + .rept \num + nop + .endr + .endm + + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ + .macro get_ptl reg + mfctl %cr28,\reg .endm /* space_to_prot macro creates a prot id from a space id */ @@ -69,30 +70,6 @@ extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot .endm #endif - - /* Switch to virtual mapping, trashing only %r1 */ - .macro virt_map - /* pcxt_ssm_bug */ - rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ - mtsp %r0, %sr4 - mtsp %r0, %sr5 - mtsp %r0, %sr6 - tovirt_r1 %r29 - load32 KERNEL_PSW, %r1 - - rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ - mtctl %r0, %cr17 /* Clear IIASQ tail */ - mtctl %r0, %cr17 /* Clear IIASQ head */ - mtctl %r1, %ipsw - load32 4f, %r1 - mtctl %r1, %cr18 /* Set IIAOQ tail */ - ldo 4(%r1), %r1 - mtctl %r1, %cr18 /* Set IIAOQ head */ - rfir - nop -4: - .endm - /* * The "get_stack" macros are responsible for determining the * kernel stack value. @@ -105,8 +82,8 @@ * Need to set up a kernel stack, so call the * get_stack_use_cr30 macro to set up a pointer * to the pt_regs structure contained within the - * task pointer pointed to by cr30. Set the stack - * pointer to point to the end of the task structure. + * task pointer pointed to by cr30. Load the stack + * pointer from the task structure. * * Note that we use shadowed registers for temps until * we can save %r26 and %r29. %r26 is used to preserve @@ -118,8 +95,6 @@ * or handle_interruption. %r29 is used to hold a pointer * the register save area, and once again, it needs to * be a non-shadowed register so that it survives the rfir. - * - * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. */ .macro get_stack_use_cr30 @@ -128,12 +103,11 @@ copy %r30, %r17 mfctl %cr30, %r1 - ldo THREAD_SZ_ALGN(%r1), %r30 - mtsp %r0,%sr7 + tophys %r1,%r9 /* task_struct */ + LDREG TASK_STACK(%r9),%r30 + ldo PT_SZ_ALGN(%r30),%r30 + mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */ mtsp %r16,%sr3 - tophys %r1,%r9 - LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ - tophys %r1,%r9 ldo TASK_REGS(%r9),%r9 STREG %r17,PT_GR30(%r9) STREG %r29,PT_GR29(%r9) @@ -411,68 +385,47 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if CONFIG_PGTABLE_LEVELS == 3 - extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else -# if defined(CONFIG_64BIT) - extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - #else - # if PAGE_SIZE > 4096 - extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index - # else - extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - # endif -# endif + extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ +#if CONFIG_PGTABLE_LEVELS < 3 copy %r0,\pte +#endif ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ SHLREG \pmd,PxD_VALUE_SHIFT,\pmd - extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm - /* Look up PTE in a 3-Level scheme. - * - * Here we implement a Hybrid L2/L3 scheme: we allocate the - * first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd - * sizes are arranged so that a single pmd covers 4GB (giving - * a full LP64 process access to 8TB) so our lookups are - * effectively L2 for the first 4GB of the kernel (i.e. for - * all ILP32 processes and all the kernel for machines with - * under 4GB of memory) */ + /* Look up PTE in a 3-Level scheme. */ .macro L3_ptep pgd,pte,index,va,fault -#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ +#if CONFIG_PGTABLE_LEVELS == 3 + copy %r0,\pte extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - copy \index,\pgd - extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd + shld \pgd,PxD_VALUE_SHIFT,\pgd #endif L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and check page is present. */ - .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault -#ifdef CONFIG_SMP + /* Acquire page_table_lock and check page is present. */ + .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault +#ifdef CONFIG_TLB_PTLOCK 98: cmpib,COND(=),n 0,\spc,2f - load_pa_tlb_lock \tmp + get_ptl \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f b \fault - stw,ma \spc,0(\tmp) + stw \tmp1,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif 2: LDREG 0(\ptp),\pte @@ -480,21 +433,20 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. */ - .macro tlb_unlock0 spc,tmp -#ifdef CONFIG_SMP -98: or,COND(=) %r0,\spc,%r0 - stw,ma \spc,0(\tmp) -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - .endm - - /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp -#ifdef CONFIG_SMP -98: load_pa_tlb_lock \tmp + /* Release page_table_lock if for user space. We use an ordered + store to ensure all prior accesses are performed prior to + releasing the lock. Note stw may not be executed, so we + provide one extra nop when CONFIG_TLB_PTLOCK is defined. */ + .macro ptl_unlock spc,tmp,tmp2 +#ifdef CONFIG_TLB_PTLOCK +98: get_ptl \tmp + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 + or,COND(=) %r0,\spc,%r0 + stw,ma \tmp2,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp + insert_nops NUM_PIPELINE_INSNS - 4 +#else + insert_nops NUM_PIPELINE_INSNS - 1 #endif .endm @@ -523,13 +475,13 @@ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) + #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT) /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ .macro convert_for_tlb_insert20 pte,tmp #ifdef CONFIG_HUGETLB_PAGE copy \pte,\tmp - extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ - 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte + extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_SHIFT,\pte @@ -537,8 +489,7 @@ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte #else /* Huge pages disabled */ - extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ - 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte + extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_SHIFT,\pte #endif @@ -548,6 +499,12 @@ * this happens is quite subtle, read below */ .macro make_insert_tlb spc,pte,prot,tmp space_to_prot \spc \prot /* create prot id from space */ + +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif + /* The following is the real subtlety. This is depositing * T <-> _PAGE_REFTRAP * D <-> _PAGE_DIRTY @@ -564,9 +521,14 @@ /* PAGE_USER indicates the page can be read with user privileges, * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 - * contains _PAGE_READ) */ + * contains _PAGE_READ). While the kernel can't directly write + * user pages which have _PAGE_WRITE zero, it can read pages + * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel + * exception fault handler doesn't trigger when reading pages + * that aren't user read accessible */ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 depdi 7,11,3,\prot + /* If we're a gateway page, drop PL2 back to zero for promotion * to kernel privilege (so we can execute the page as kernel). * Any privilege promotion page always denys read and write */ @@ -590,6 +552,10 @@ * makes the tlb entry for the differently formatted pa11 * insertion instructions */ .macro make_insert_tlb_11 spc,pte,prot +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif zdep \spc,30,15,\prot dep \pte,8,7,\prot extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 @@ -615,8 +581,9 @@ extrd,s \pte,63,25,\pte .endm - /* The alias region is an 8MB aligned 16MB to do clear and - * copy user pages at addresses congruent with the user + /* The alias region is comprised of a pair of 4 MB regions + * aligned to 8 MB. It is used to clear/copy/flush user pages + * using kernel virtual addresses congruent with the user * virtual address. * * To use the alias page, you set %r26 up with the to TLB @@ -626,13 +593,8 @@ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp -#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) - /* on LP64, ldi will sign extend into the upper 32 bits, - * which is behaviour we don't want */ - depdi 0,31,32,\tmp -#endif copy \va,\tmp1 - depi 0,31,23,\tmp1 + depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault mfctl %cr19,\tmp /* iir */ /* get the opcode (first six bits) into \tmp */ @@ -665,13 +627,13 @@ * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */ -#ifdef CONFIG_64BIT - extrd,u,*= \va,41,1,%r0 -#else - extrw,u,= \va,9,1,%r0 -#endif + extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 or,COND(tr) %r23,%r0,\pte or %r26,%r0,\pte + + /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ + SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte + depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte .endm @@ -783,7 +745,7 @@ ENTRY(ret_from_kernel_thread) BL schedule_tail, %r2 nop - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 + mfctl %cr30,%r1 /* task_struct */ LDREG TASK_PT_GR25(%r1), %r26 #ifdef CONFIG_64BIT LDREG TASK_PT_GR27(%r1), %r27 @@ -814,7 +776,6 @@ ENTRY_CFI(_switch_to) STREG %r30, TASK_PT_KSP(%r26) LDREG TASK_PT_KSP(%r25), %r30 - LDREG TASK_THREAD_INFO(%r25), %r25 bv %r0(%r2) mtctl %r25,%cr30 @@ -845,17 +806,16 @@ ENDPROC_CFI(_switch_to) .align PAGE_SIZE ENTRY_CFI(syscall_exit_rfi) - mfctl %cr30,%r16 - LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ + mfctl %cr30,%r16 /* task_struct */ ldo TASK_REGS(%r16),%r16 /* Force iaoq to userspace, as the user has had access to our current * context via sigcontext. Also Filter the PSW for the same reason. */ LDREG PT_IAOQ0(%r16),%r19 - depi 3,31,2,%r19 + depi PRIV_USER,31,2,%r19 STREG %r19,PT_IAOQ0(%r16) LDREG PT_IAOQ1(%r16),%r19 - depi 3,31,2,%r19 + depi PRIV_USER,31,2,%r19 STREG %r19,PT_IAOQ1(%r16) LDREG PT_PSW(%r16),%r19 load32 USER_PSW_MASK,%r1 @@ -891,15 +851,15 @@ ENTRY_CFI(syscall_exit_rfi) ENTRY(intr_return) /* check for reschedule */ mfctl %cr30,%r1 - LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ + LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ .import do_notify_resume,code intr_check_sig: /* As above */ mfctl %cr30,%r1 - LDREG TI_FLAGS(%r1),%r19 - ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20 + LDREG TASK_TI_FLAGS(%r1),%r19 + ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 and,COND(<>) %r19, %r20, %r0 b,n intr_restore /* skip past if we've nothing to do */ @@ -911,20 +871,20 @@ intr_check_sig: * Only do signals if we are returning to user space */ LDREG PT_IASQ0(%r16), %r20 - cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */ + cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ LDREG PT_IASQ1(%r16), %r20 - cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */ - - /* NOTE: We need to enable interrupts if we have to deliver - * signals. We used to do this earlier but it caused kernel - * stack overflows. */ - ssm PSW_SM_I, %r0 + cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ copy %r0, %r25 /* long in_syscall = 0 */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + /* NOTE: We need to enable interrupts if we have to deliver + * signals. We used to do this earlier but it caused kernel + * stack overflows. */ + ssm PSW_SM_I, %r0 + BL do_notify_resume,%r2 copy %r16, %r26 /* struct pt_regs *regs */ @@ -954,14 +914,14 @@ intr_restore: rfi nop -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION # define intr_do_preempt intr_restore -#endif /* !CONFIG_PREEMPT */ +#endif /* !CONFIG_PREEMPTION */ .import schedule,code intr_do_resched: /* Only call schedule on return to userspace. If we're returning - * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise + * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise * we jump back to intr_restore. */ LDREG PT_IASQ0(%r16), %r20 @@ -993,15 +953,15 @@ intr_do_resched: * and preempt_count is 0. otherwise, we continue on * our merry way back to the current running task. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION .import preempt_schedule_irq,code intr_do_preempt: rsm PSW_SM_I, %r0 /* disable interrupts */ /* current_thread_info()->preempt_count */ mfctl %cr30, %r1 - LDREG TI_PRE_COUNT(%r1), %r19 - cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ + ldw TI_PRE_COUNT(%r1), %r19 + cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */ nop /* prev insn branched backwards */ /* check if we interrupted a critical path */ @@ -1009,11 +969,18 @@ intr_do_preempt: bb,<,n %r20, 31 - PSW_SM_I, intr_restore nop + /* ssm PSW_SM_I done later in intr_restore */ +#ifdef CONFIG_MLONGCALLS + ldil L%intr_restore, %r2 + load32 preempt_schedule_irq, %r1 + bv %r0(%r1) + ldo R%intr_restore(%r2), %r2 +#else + ldil L%intr_restore, %r1 BL preempt_schedule_irq, %r2 - nop - - b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ -#endif /* CONFIG_PREEMPT */ + ldo R%intr_restore(%r1), %r2 +#endif +#endif /* CONFIG_PREEMPTION */ /* * External interrupts. @@ -1091,23 +1058,26 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) -#if 0 && defined(CONFIG_64BIT) - /* Revisit when we have 64-bit code above 4Gb */ - b,n intr_save2 - +#if defined(CONFIG_64BIT) skip_save_ior: /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we * need to adjust iasq/iaoq here in the same way we adjusted isr/ior * above. */ - extrd,u,* %r8,PSW_W_BIT,1,%r1 - cmpib,COND(=),n 1,%r1,intr_save2 + bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 LDREG PT_IASQ0(%r29), %r16 LDREG PT_IAOQ0(%r29), %r17 - /* adjust iasq/iaoq */ + /* adjust iasq0/iaoq0 */ space_adjust %r16,%r17,%r1 STREG %r16, PT_IASQ0(%r29) STREG %r17, PT_IAOQ0(%r29) + + LDREG PT_IASQ1(%r29), %r16 + LDREG PT_IAOQ1(%r29), %r17 + /* adjust iasq1/iaoq1 */ + space_adjust %r16,%r17,%r1 + STREG %r16, PT_IASQ1(%r29) + STREG %r17, PT_IAOQ1(%r29) #else skip_save_ior: #endif @@ -1170,14 +1140,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1186,6 +1156,7 @@ dtlb_check_alias_20w: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1196,14 +1167,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1212,6 +1183,7 @@ nadtlb_check_alias_20w: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1224,7 +1196,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1237,7 +1209,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1247,6 +1219,7 @@ dtlb_check_alias_11: idtlba pte,(va) idtlbp prot,(va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1257,7 +1230,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1270,7 +1243,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1280,6 +1253,7 @@ nadtlb_check_alias_11: idtlba pte,(va) idtlbp prot,(va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1290,7 +1264,7 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1299,7 +1273,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1308,6 +1282,7 @@ dtlb_check_alias_20: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1318,7 +1293,7 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1327,7 +1302,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1336,6 +1311,7 @@ nadtlb_check_alias_20: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1344,74 +1320,12 @@ nadtlb_check_alias_20: nadtlb_emulate: /* - * Non access misses can be caused by fdc,fic,pdc,lpa,probe and - * probei instructions. We don't want to fault for these - * instructions (not only does it not make sense, it can cause - * deadlocks, since some flushes are done with the mmap - * semaphore held). If the translation doesn't exist, we can't - * insert a translation, so have to emulate the side effects - * of the instruction. Since we don't insert a translation - * we can get a lot of faults during a flush loop, so it makes - * sense to try to do it here with minimum overhead. We only - * emulate fdc,fic,pdc,probew,prober instructions whose base - * and index registers are not shadowed. We defer everything - * else to the "slow" path. + * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and + * probei instructions. The kernel no longer faults doing flushes. + * Use of lpa and probe instructions is rare. Given the issue + * with shadow registers, we defer everything to the "slow" path. */ - - mfctl %cr19,%r9 /* Get iir */ - - /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. - Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ - - /* Checks for fdc,fdce,pdc,"fic,4f" only */ - ldi 0x280,%r16 - and %r9,%r16,%r17 - cmpb,<>,n %r16,%r17,nadtlb_probe_check - bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ - BL get_register,%r25 - extrw,u %r9,15,5,%r8 /* Get index register # */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - copy %r1,%r24 - BL get_register,%r25 - extrw,u %r9,10,5,%r8 /* Get base register # */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - BL set_register,%r25 - add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ - -nadtlb_nullify: - mfctl %ipsw,%r8 - ldil L%PSW_N,%r9 - or %r8,%r9,%r8 /* Set PSW_N */ - mtctl %r8,%ipsw - - rfir - nop - - /* - When there is no translation for the probe address then we - must nullify the insn and return zero in the target register. - This will indicate to the calling code that it does not have - write/read privileges to this address. - - This should technically work for prober and probew in PA 1.1, - and also probe,r and probe,w in PA 2.0 - - WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! - THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. - - */ -nadtlb_probe_check: - ldi 0x80,%r16 - and %r9,%r16,%r17 - cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ - BL get_register,%r25 /* Find the target register */ - extrw,u %r9,31,5,%r8 /* Get target register */ - cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ - BL set_register,%r25 - copy %r0,%r1 /* Write zero to target register */ - b nadtlb_nullify /* Nullify return insn */ - nop - + b,n nadtlb_fault #ifdef CONFIG_64BIT itlb_miss_20w: @@ -1427,14 +1341,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1451,14 +1365,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1467,6 +1381,7 @@ naitlb_check_alias_20w: iitlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1479,7 +1394,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1492,7 +1407,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1503,7 +1418,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1516,7 +1431,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1526,6 +1441,7 @@ naitlb_check_alias_11: iitlba pte,(%sr0, va) iitlbp prot,(%sr0, va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1537,7 +1453,7 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1546,7 +1462,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1557,7 +1473,7 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1566,7 +1482,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1575,6 +1491,7 @@ naitlb_check_alias_20: iitlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1589,14 +1506,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop #else @@ -1609,7 +1526,7 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1622,7 +1539,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1633,7 +1550,7 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 @@ -1642,7 +1559,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop #endif @@ -1735,7 +1652,7 @@ dtlb_fault: .macro fork_like name ENTRY_CFI(sys_\name\()_wrapper) - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 + mfctl %cr30,%r1 ldo TASK_REGS(%r1),%r1 reg_save %r1 mfctl %cr27, %r28 @@ -1746,6 +1663,7 @@ ENDPROC_CFI(sys_\name\()_wrapper) .endm fork_like clone +fork_like clone3 fork_like fork fork_like vfork @@ -1754,7 +1672,7 @@ ENTRY(child_return) BL schedule_tail, %r2 nop finish_child_return: - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 + mfctl %cr30,%r1 ldo TASK_REGS(%r1),%r1 /* get pt regs */ LDREG PT_CR27(%r1), %r3 @@ -1765,7 +1683,7 @@ finish_child_return: END(child_return) ENTRY_CFI(sys_rt_sigreturn_wrapper) - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 + mfctl %cr30,%r26 ldo TASK_REGS(%r26),%r26 /* get pt regs */ /* Don't save regs, we are going to restore them from sigcontext. */ STREG %r2, -RP_OFFSET(%r30) @@ -1782,7 +1700,7 @@ ENTRY_CFI(sys_rt_sigreturn_wrapper) LDREG -RP_OFFSET(%r30), %r2 /* FIXME: I think we need to restore a few more things here. */ - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 + mfctl %cr30,%r1 ldo TASK_REGS(%r1),%r1 /* get pt regs */ reg_restore %r1 @@ -1801,9 +1719,7 @@ ENTRY(syscall_exit) */ /* save return value now */ - mfctl %cr30, %r1 - LDREG TI_TASK(%r1),%r1 STREG %r28,TASK_PT_GR28(%r1) /* Seems to me that dp could be wrong here, if the syscall involved @@ -1814,14 +1730,15 @@ ENTRY(syscall_exit) syscall_check_resched: /* check for reschedule */ - - LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ + mfctl %cr30,%r19 + LDREG TASK_TI_FLAGS(%r19),%r19 /* long */ bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ .import do_signal,code syscall_check_sig: - LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 - ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26 + mfctl %cr30,%r19 + LDREG TASK_TI_FLAGS(%r19),%r19 + ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 and,COND(<>) %r19, %r26, %r0 b,n syscall_restore /* skip past if we've nothing to do */ @@ -1831,7 +1748,7 @@ syscall_do_signal: * consistent with all the relevant state of the process * before the syscall. We need to verify this. */ - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 + mfctl %cr30,%r1 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ reg_save %r26 @@ -1842,18 +1759,18 @@ syscall_do_signal: BL do_notify_resume,%r2 ldi 1, %r25 /* long in_syscall = 1 */ - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 + mfctl %cr30,%r1 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ reg_restore %r20 b,n syscall_check_sig syscall_restore: - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 + mfctl %cr30,%r1 /* Are we being ptraced? */ - ldw TASK_FLAGS(%r1),%r19 - ldi _TIF_SYSCALL_TRACE_MASK,%r2 + LDREG TASK_TI_FLAGS(%r1),%r19 + ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 and,COND(=) %r19,%r2,%r0 b,n syscall_restore_rfi @@ -1891,7 +1808,7 @@ syscall_restore: mtsp %r1,%sr5 /* Restore sr5 */ mtsp %r1,%sr6 /* Restore sr6 */ - depi 3,31,2,%r31 /* ensure return to user mode. */ + depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */ #ifdef CONFIG_64BIT /* decide whether to reset the wide mode bit @@ -1929,6 +1846,10 @@ syscall_restore_rfi: extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ +#ifdef CONFIG_64BIT + extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0 + depi -1,4,1,%r20 /* W bit */ +#endif STREG %r20,TASK_PT_PSW(%r1) /* Always store space registers, since sr3 can be changed (e.g. fork) */ @@ -1942,7 +1863,6 @@ syscall_restore_rfi: STREG %r25,TASK_PT_IASQ0(%r1) STREG %r25,TASK_PT_IASQ1(%r1) - /* XXX W bit??? */ /* Now if old D bit is clear, it means we didn't save all registers * on syscall entry, so do that now. This only happens on TRACEME * calls, or if someone attached to us while we were on a syscall. @@ -1967,7 +1887,7 @@ syscall_restore_rfi: STREG %r0,TASK_PT_SR2(%r1) LDREG TASK_PT_GR31(%r1),%r2 - depi 3,31,2,%r2 /* ensure return to user mode. */ + depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ STREG %r2,TASK_PT_IAOQ0(%r1) ldo 4(%r2),%r2 STREG %r2,TASK_PT_IAOQ1(%r1) @@ -1976,10 +1896,10 @@ syscall_restore_rfi: pt_regs_ok: LDREG TASK_PT_IAOQ0(%r1),%r2 - depi 3,31,2,%r2 /* ensure return to user mode. */ + depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ STREG %r2,TASK_PT_IAOQ0(%r1) LDREG TASK_PT_IAOQ1(%r1),%r2 - depi 3,31,2,%r2 + depi PRIV_USER,31,2,%r2 STREG %r2,TASK_PT_IAOQ1(%r1) b intr_restore copy %r25,%r16 @@ -2009,6 +1929,7 @@ _mcount: * calling mcount(), and 2 instructions for ftrace_stub(). That way we * have all on one L1 cacheline. */ + ldi 0, %arg3 b ftrace_function_trampoline copy %r3, %arg2 /* caller original %sp */ ftrace_stub: @@ -2026,6 +1947,168 @@ ftrace_stub: #endif ENDPROC_CFI(mcount) +#ifdef CONFIG_DYNAMIC_FTRACE + +#ifdef CONFIG_64BIT +#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) +#else +#define FTRACE_FRAME_SIZE FRAME_SIZE +#endif +ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) +ftrace_caller: + .global ftrace_caller + + STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) + ldo -FTRACE_FRAME_SIZE(%sp), %r3 + STREG %rp, -RP_OFFSET(%r3) + + /* Offset 0 is already allocated for %r1 */ + STREG %r23, 2*REG_SZ(%r3) + STREG %r24, 3*REG_SZ(%r3) + STREG %r25, 4*REG_SZ(%r3) + STREG %r26, 5*REG_SZ(%r3) + STREG %r28, 6*REG_SZ(%r3) + STREG %r29, 7*REG_SZ(%r3) +#ifdef CONFIG_64BIT + STREG %r19, 8*REG_SZ(%r3) + STREG %r20, 9*REG_SZ(%r3) + STREG %r21, 10*REG_SZ(%r3) + STREG %r22, 11*REG_SZ(%r3) + STREG %r27, 12*REG_SZ(%r3) + STREG %r31, 13*REG_SZ(%r3) + loadgp + ldo -16(%sp),%r29 +#endif + LDREG 0(%r3), %r25 + copy %rp, %r26 + ldo -8(%r25), %r25 + ldi 0, %r23 /* no pt_regs */ + b,l ftrace_function_trampoline, %rp + copy %r3, %r24 + + LDREG -RP_OFFSET(%r3), %rp + LDREG 2*REG_SZ(%r3), %r23 + LDREG 3*REG_SZ(%r3), %r24 + LDREG 4*REG_SZ(%r3), %r25 + LDREG 5*REG_SZ(%r3), %r26 + LDREG 6*REG_SZ(%r3), %r28 + LDREG 7*REG_SZ(%r3), %r29 +#ifdef CONFIG_64BIT + LDREG 8*REG_SZ(%r3), %r19 + LDREG 9*REG_SZ(%r3), %r20 + LDREG 10*REG_SZ(%r3), %r21 + LDREG 11*REG_SZ(%r3), %r22 + LDREG 12*REG_SZ(%r3), %r27 + LDREG 13*REG_SZ(%r3), %r31 +#endif + LDREG 1*REG_SZ(%r3), %r3 + + LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 + /* Adjust return point to jump back to beginning of traced function */ + ldo -4(%r1), %r1 + bv,n (%r1) + +ENDPROC_CFI(ftrace_caller) + +#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS +ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, + CALLS,SAVE_RP,SAVE_SP) +ftrace_regs_caller: + .global ftrace_regs_caller + + ldo -FTRACE_FRAME_SIZE(%sp), %r1 + STREG %rp, -RP_OFFSET(%r1) + + copy %sp, %r1 + ldo PT_SZ_ALGN(%sp), %sp + + STREG %rp, PT_GR2(%r1) + STREG %r3, PT_GR3(%r1) + STREG %r4, PT_GR4(%r1) + STREG %r5, PT_GR5(%r1) + STREG %r6, PT_GR6(%r1) + STREG %r7, PT_GR7(%r1) + STREG %r8, PT_GR8(%r1) + STREG %r9, PT_GR9(%r1) + STREG %r10, PT_GR10(%r1) + STREG %r11, PT_GR11(%r1) + STREG %r12, PT_GR12(%r1) + STREG %r13, PT_GR13(%r1) + STREG %r14, PT_GR14(%r1) + STREG %r15, PT_GR15(%r1) + STREG %r16, PT_GR16(%r1) + STREG %r17, PT_GR17(%r1) + STREG %r18, PT_GR18(%r1) + STREG %r19, PT_GR19(%r1) + STREG %r20, PT_GR20(%r1) + STREG %r21, PT_GR21(%r1) + STREG %r22, PT_GR22(%r1) + STREG %r23, PT_GR23(%r1) + STREG %r24, PT_GR24(%r1) + STREG %r25, PT_GR25(%r1) + STREG %r26, PT_GR26(%r1) + STREG %r27, PT_GR27(%r1) + STREG %r28, PT_GR28(%r1) + STREG %r29, PT_GR29(%r1) + STREG %r30, PT_GR30(%r1) + STREG %r31, PT_GR31(%r1) + mfctl %cr11, %r26 + STREG %r26, PT_SAR(%r1) + + copy %rp, %r26 + LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 + ldo -8(%r25), %r25 + ldo -FTRACE_FRAME_SIZE(%r1), %arg2 + b,l ftrace_function_trampoline, %rp + copy %r1, %arg3 /* struct pt_regs */ + + ldo -PT_SZ_ALGN(%sp), %r1 + + LDREG PT_SAR(%r1), %rp + mtctl %rp, %cr11 + + LDREG PT_GR2(%r1), %rp + LDREG PT_GR3(%r1), %r3 + LDREG PT_GR4(%r1), %r4 + LDREG PT_GR5(%r1), %r5 + LDREG PT_GR6(%r1), %r6 + LDREG PT_GR7(%r1), %r7 + LDREG PT_GR8(%r1), %r8 + LDREG PT_GR9(%r1), %r9 + LDREG PT_GR10(%r1),%r10 + LDREG PT_GR11(%r1),%r11 + LDREG PT_GR12(%r1),%r12 + LDREG PT_GR13(%r1),%r13 + LDREG PT_GR14(%r1),%r14 + LDREG PT_GR15(%r1),%r15 + LDREG PT_GR16(%r1),%r16 + LDREG PT_GR17(%r1),%r17 + LDREG PT_GR18(%r1),%r18 + LDREG PT_GR19(%r1),%r19 + LDREG PT_GR20(%r1),%r20 + LDREG PT_GR21(%r1),%r21 + LDREG PT_GR22(%r1),%r22 + LDREG PT_GR23(%r1),%r23 + LDREG PT_GR24(%r1),%r24 + LDREG PT_GR25(%r1),%r25 + LDREG PT_GR26(%r1),%r26 + LDREG PT_GR27(%r1),%r27 + LDREG PT_GR28(%r1),%r28 + LDREG PT_GR29(%r1),%r29 + LDREG PT_GR30(%r1),%r30 + LDREG PT_GR31(%r1),%r31 + + ldo -PT_SZ_ALGN(%sp), %sp + LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 + /* Adjust return point to jump back to beginning of traced function */ + ldo -4(%r1), %r1 + bv,n (%r1) + +ENDPROC_CFI(ftrace_regs_caller) + +#endif +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER .align 8 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index e6f3b49f2fd7..042343492a28 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1,9 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/parisc/kernel/firmware.c - safe PDC access routines * * PDC == Processor Dependent Code * - * See http://www.parisc-linux.org/documentation/index.html + * See PDC documentation at + * https://parisc.wiki.kernel.org/index.php/Technical_Documentation * for documentation describing the entry points and calling * conventions defined below. * @@ -12,12 +14,6 @@ * Copyright 2003 Grant Grundler <grundler parisc-linux org> * Copyright 2003,2004 Ryan Bradetich <rbrad@parisc-linux.org> * Copyright 2004,2006 Thibaut VARENE <varenet@parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * */ /* I think it would be in everyone's best interest to follow this @@ -55,7 +51,7 @@ * prumpf 991016 */ -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/delay.h> #include <linux/init.h> @@ -78,16 +74,16 @@ static DEFINE_SPINLOCK(pdc_lock); #endif -extern unsigned long pdc_result[NUM_PDC_RESULT]; -extern unsigned long pdc_result2[NUM_PDC_RESULT]; +static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); +static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); #ifdef CONFIG_64BIT -#define WIDE_FIRMWARE 0x1 -#define NARROW_FIRMWARE 0x2 +#define WIDE_FIRMWARE PDC_MODEL_OS64 +#define NARROW_FIRMWARE PDC_MODEL_OS32 -/* Firmware needs to be initially set to narrow to determine the +/* Firmware needs to be initially set to narrow to determine the * actual firmware width. */ -int parisc_narrow_firmware __read_mostly = 1; +int parisc_narrow_firmware __ro_after_init = NARROW_FIRMWARE; #endif /* On most currently-supported platforms, IODC I/O calls are 32-bit calls @@ -127,10 +123,10 @@ static unsigned long f_extend(unsigned long address) #ifdef CONFIG_64BIT if(unlikely(parisc_narrow_firmware)) { if((address & 0xff000000) == 0xf0000000) - return 0xf0f0f0f000000000UL | (u32)address; + return (0xfffffff0UL << 32) | (u32)address; if((address & 0xf0000000) == 0xf0000000) - return 0xffffffff00000000UL | (u32)address; + return (0xffffffffUL << 32) | (u32)address; } #endif return address; @@ -138,7 +134,7 @@ static unsigned long f_extend(unsigned long address) /** * convert_to_wide - Convert the return buffer addresses into kernel addresses. - * @address: The return buffer from PDC. + * @addr: The return buffer from PDC. * * This function is used to convert the return buffer addresses retrieved from PDC * into kernel addresses when the PDC address size and kernel address size are @@ -164,20 +160,27 @@ void set_firmware_width_unlocked(void) ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0); + if (ret < 0) + return; convert_to_wide(pdc_result); if (pdc_result[0] != NARROW_FIRMWARE) parisc_narrow_firmware = 0; } - + /** * set_firmware_width - Determine if the firmware is wide or narrow. - * + * * This function must be called before any pdc_* function that uses the * convert_to_wide function. */ void set_firmware_width(void) { unsigned long flags; + + /* already initialized? */ + if (parisc_narrow_firmware != NARROW_FIRMWARE) + return; + spin_lock_irqsave(&pdc_lock, flags); set_firmware_width_unlocked(); spin_unlock_irqrestore(&pdc_lock, flags); @@ -254,8 +257,8 @@ int __init pdc_instr(unsigned int *instr) /** * pdc_chassis_info - Return chassis information. - * @result: The return buffer. * @chassis_info: The memory buffer address. + * @led_info: The size of the memory buffer address. * @len: The size of the memory buffer address. * * An HVERSION dependent call for returning the chassis information. @@ -279,7 +282,8 @@ int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_inf /** * pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message. - * @retval: -1 on error, 0 on success. Other value are PDC errors + * @state: state of the machine + * @data: value for that state * * Must be correctly formatted or expect system crash */ @@ -302,7 +306,7 @@ int pdc_pat_chassis_send_log(unsigned long state, unsigned long data) /** * pdc_chassis_disp - Updates chassis code - * @retval: -1 on error, 0 on success + * @disp: value to show on display */ int pdc_chassis_disp(unsigned long disp) { @@ -317,8 +321,57 @@ int pdc_chassis_disp(unsigned long disp) } /** + * __pdc_cpu_rendezvous - Stop currently executing CPU and do not return. + */ +int __pdc_cpu_rendezvous(void) +{ + if (is_pdc_pat()) + return mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_RENDEZVOUS); + else + return mem_pdc_call(PDC_PROC, 1, 0); +} + +/** + * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state + */ +void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock) +{ + spin_lock(&pdc_lock); +} + +/** + * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state + */ +void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock) +{ + spin_unlock(&pdc_lock); +} + +/** + * pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU + * @pdc_entry: pointer to where the PDC entry point should be stored + */ +int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry) +{ + int retval = 0; + unsigned long flags; + + if (!IS_ENABLED(CONFIG_SMP) || !is_pdc_pat()) { + *pdc_entry = MEM_PDC; + return 0; + } + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_PDC_ENTRYPOINT, + __pa(pdc_result)); + *pdc_entry = pdc_result[0]; + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} +/** * pdc_chassis_warn - Fetches chassis warnings - * @retval: -1 on error, 0 on success + * @warn: The warning value to be shown */ int pdc_chassis_warn(unsigned long *warn) { @@ -411,7 +464,8 @@ int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, unsigned long flags; spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), + memcpy(pdc_result2, mod_path, sizeof(*mod_path)); + retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), __pa(pdc_result2), mod_index); convert_to_wide(pdc_result); memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info)); @@ -470,20 +524,21 @@ int pdc_model_info(struct pdc_model *model) /** * pdc_model_sysmodel - Get the system model name. + * @os_id: The operating system ID asked for (an OS_ID_* value) * @name: A char array of at least 81 characters. * * Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L). * Using OS_ID_HPUX will return the equivalent of the 'modelname' command * on HP/UX. */ -int pdc_model_sysmodel(char *name) +int pdc_model_sysmodel(unsigned int os_id, char *name) { int retval; unsigned long flags; spin_lock_irqsave(&pdc_lock, flags); retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result), - OS_ID_HPUX, __pa(name)); + os_id, __pa(name)); convert_to_wide(pdc_result); if (retval == PDC_OK) { @@ -498,7 +553,7 @@ int pdc_model_sysmodel(char *name) /** * pdc_model_versions - Identify the version number of each processor. - * @cpu_id: The return buffer. + * @versions: The return buffer. * @id: The id of the processor to check. * * Returns the version number for each processor component. @@ -569,6 +624,30 @@ int pdc_model_capabilities(unsigned long *capabilities) } /** + * pdc_model_platform_info - Returns machine product and serial number. + * @orig_prod_num: Return buffer for original product number. + * @current_prod_num: Return buffer for current product number. + * @serial_no: Return buffer for serial number. + * + * Returns strings containing the original and current product numbers and the + * serial number of the system. + */ +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, + char *serial_no) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_GET_PLATFORM_INFO, + __pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no)); + convert_to_wide(pdc_result); + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + +/** * pdc_cache_info - Return cache and TLB information. * @cache_info: The return buffer. * @@ -609,7 +688,6 @@ int pdc_spaceid_bits(unsigned long *space_bits) return retval; } -#ifndef CONFIG_PA20 /** * pdc_btlb_info - Return block TLB information. * @btlb: The return buffer. @@ -618,18 +696,51 @@ int pdc_spaceid_bits(unsigned long *space_bits) */ int pdc_btlb_info(struct pdc_btlb_info *btlb) { - int retval; + int retval; unsigned long flags; - spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); - memcpy(btlb, pdc_result, sizeof(*btlb)); - spin_unlock_irqrestore(&pdc_lock, flags); + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; - if(retval < 0) { - btlb->max_size = 0; - } - return retval; + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); + memcpy(btlb, pdc_result, sizeof(*btlb)); + spin_unlock_irqrestore(&pdc_lock, flags); + + if(retval < 0) { + btlb->max_size = 0; + } + return retval; +} + +int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len, + unsigned long entry_info, unsigned long slot) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32), + (unsigned long) vpage, physpage, len, entry_info, slot); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; +} + +int pdc_btlb_purge_all(void) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; } /** @@ -650,6 +761,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, int retval; unsigned long flags; + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + spin_lock_irqsave(&pdc_lock, flags); memcpy(pdc_result2, mod_path, sizeof(*mod_path)); retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result), @@ -659,7 +773,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, return retval; } -#endif /* !CONFIG_PA20 */ /** * pdc_lan_station_id - Get the LAN address. @@ -921,8 +1034,8 @@ int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl) /** * pdc_pci_config_read - read PCI config space. - * @hpa token from PDC to indicate which PCI device - * @pci_addr configuration space address to read from + * @hpa: Token from PDC to indicate which PCI device + * @cfg_addr: Configuration space address to read from * * Read PCI Configuration space *before* linux PCI subsystem is running. */ @@ -944,9 +1057,9 @@ unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr) /** * pdc_pci_config_write - read PCI config space. - * @hpa token from PDC to indicate which PCI device - * @pci_addr configuration space address to write - * @val value we want in the 32-bit register + * @hpa: Token from PDC to indicate which PCI device + * @cfg_addr: Configuration space address to write + * @val: Value we want in the 32-bit register * * Write PCI Configuration space *before* linux PCI subsystem is running. */ @@ -1029,6 +1142,38 @@ int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret, } /** + * pdc_pim_toc11 - Fetch TOC PIM 1.1 data from firmware. + * @ret: pointer to return buffer + */ +int pdc_pim_toc11(struct pdc_toc_pim_11 *ret) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PIM, PDC_PIM_TOC, __pa(pdc_result), + __pa(ret), sizeof(*ret)); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; +} + +/** + * pdc_pim_toc20 - Fetch TOC PIM 2.0 data from firmware. + * @ret: pointer to return buffer + */ +int pdc_pim_toc20(struct pdc_toc_pim_20 *ret) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PIM, PDC_PIM_TOC, __pa(pdc_result), + __pa(ret), sizeof(*ret)); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; +} + +/** * pdc_tod_set - Set the Time-Of-Day clock. * @sec: The number of seconds since epoch. * @usec: The number of micro seconds. @@ -1125,15 +1270,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg) } /* - * pdc_soft_power_button - Control the soft power button behaviour - * @sw_control: 0 for hardware control, 1 for software control + * pdc_soft_power_button{_panic} - Control the soft power button behaviour + * @sw_control: 0 for hardware control, 1 for software control * * * This PDC function places the soft power button under software or * hardware control. - * Under software control the OS may control to when to allow to shut - * down the system. Under hardware control pressing the power button + * Under software control the OS may control to when to allow to shut + * down the system. Under hardware control pressing the power button * powers off the system immediately. + * + * The _panic version relies on spin_trylock to prevent deadlock + * on panic path. */ int pdc_soft_power_button(int sw_control) { @@ -1147,6 +1295,22 @@ int pdc_soft_power_button(int sw_control) return retval; } +int pdc_soft_power_button_panic(int sw_control) +{ + int retval; + unsigned long flags; + + if (!spin_trylock_irqsave(&pdc_lock, flags)) { + pr_emerg("Couldn't enable soft power button\n"); + return -EBUSY; /* ignored by the panic notifier */ + } + + retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control); + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + /* * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices. * Primarily a problem on T600 (which parisc-linux doesn't support) but @@ -1181,9 +1345,8 @@ void pdc_io_reset_devices(void) #endif /* defined(BOOTLOADER) */ -/* locked by pdc_console_lock */ -static int __attribute__((aligned(8))) iodc_retbuf[32]; -static char __attribute__((aligned(64))) iodc_dbuf[4096]; +/* locked by pdc_lock */ +static char iodc_dbuf[4096] __page_aligned_bss; /** * pdc_iodc_print - Console print using IODC. @@ -1197,15 +1360,19 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096]; */ int pdc_iodc_print(const unsigned char *str, unsigned count) { - unsigned int i; + unsigned int i, found = 0; unsigned long flags; + count = min_t(unsigned int, count, sizeof(iodc_dbuf)); + + spin_lock_irqsave(&pdc_lock, flags); for (i = 0; i < count;) { switch(str[i]) { case '\n': iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; + found = 1; goto print; default: iodc_dbuf[i] = str[i]; @@ -1215,14 +1382,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) } print: - spin_lock_irqsave(&pdc_lock, flags); - real32_call(PAGE0->mem_cons.iodc_io, - (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, - PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); - spin_unlock_irqrestore(&pdc_lock, flags); + real32_call(PAGE0->mem_cons.iodc_io, + (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, + PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), + __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); + spin_unlock_irqrestore(&pdc_lock, flags); - return i; + return i - found; } #if !defined(BOOTLOADER) @@ -1247,10 +1413,11 @@ int pdc_iodc_getc(void) real32_call(PAGE0->mem_kbd.iodc_io, (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN, PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0); + __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0); ch = *iodc_dbuf; - status = *iodc_retbuf; + /* like convert_to_wide() but for first return value only: */ + status = *(int *)&pdc_result; spin_unlock_irqrestore(&pdc_lock, flags); if (status == 0) @@ -1260,17 +1427,25 @@ int pdc_iodc_getc(void) } int pdc_sti_call(unsigned long func, unsigned long flags, - unsigned long inptr, unsigned long outputr, - unsigned long glob_cfg) + unsigned long inptr, unsigned long outputr, + unsigned long glob_cfg, int do_call64) { - int retval; + int retval = 0; unsigned long irqflags; - spin_lock_irqsave(&pdc_lock, irqflags); - retval = real32_call(func, flags, inptr, outputr, glob_cfg); - spin_unlock_irqrestore(&pdc_lock, irqflags); + spin_lock_irqsave(&pdc_lock, irqflags); + if (IS_ENABLED(CONFIG_64BIT) && do_call64) { +#ifdef CONFIG_64BIT + retval = real64_call(func, flags, inptr, outputr, glob_cfg); +#else + WARN_ON(1); +#endif + } else { + retval = real32_call(func, flags, inptr, outputr, glob_cfg); + } + spin_unlock_irqrestore(&pdc_lock, irqflags); - return retval; + return retval; } EXPORT_SYMBOL(pdc_sti_call); @@ -1420,7 +1595,7 @@ int pdc_pat_get_irt(void *r_addr, unsigned long cell_num) /** * pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges. - * @actlen: The return buffer. + * @actual_len: The return buffer. * @mem_addr: Pointer to the memory buffer. * @count: The number of bytes to read from the buffer. * @offset: The offset with respect to the beginning of the buffer. @@ -1443,7 +1618,7 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, } /** - * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions. + * pdc_pat_pd_get_pdc_revisions - Retrieve PDC interface revisions. * @legacy_rev: The legacy revision. * @pat_rev: The PAT revision. * @pdc_cap: The PDC capabilities. @@ -1498,7 +1673,7 @@ int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr) * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges. * @pci_addr: PCI configuration space address for which the write request is being made. * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4. - * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be + * @val: Pointer to 1, 2, or 4 byte value in low order end of argument to be * written to PCI Config space. * */ @@ -1516,7 +1691,7 @@ int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val) } /** - * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table + * pdc_pat_mem_pdt_info - Retrieve information about page deallocation table * @rinfo: memory pdt information * */ diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index e46a4157a894..10fd5b3e63e7 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -7,21 +7,26 @@ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * * future possible enhancements: - * - add CONFIG_DYNAMIC_FTRACE * - add CONFIG_STACK_TRACER */ #include <linux/init.h> #include <linux/ftrace.h> +#include <linux/uaccess.h> +#include <linux/kprobes.h> +#include <linux/ptrace.h> +#include <linux/jump_label.h> #include <asm/assembly.h> #include <asm/sections.h> #include <asm/ftrace.h> +#include <asm/text-patching.h> - -#define __hot __attribute__ ((__section__ (".text.hot"))) +#define __hot __section(".text.hot") #ifdef CONFIG_FUNCTION_GRAPH_TRACER +static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable); + /* * Hook the return address and push it in the stack of return addrs * in current thread info. @@ -46,22 +51,19 @@ static void __hot prepare_ftrace_return(unsigned long *parent, } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -void notrace __hot ftrace_function_trampoline(unsigned long parent, +static ftrace_func_t ftrace_func; + +asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, - unsigned long org_sp_gr3) + unsigned long org_sp_gr3, + struct ftrace_regs *fregs) { - extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ - extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); + extern struct ftrace_ops *function_trace_op; - if (ftrace_trace_function != ftrace_stub) { - /* struct ftrace_ops *op, struct pt_regs *regs); */ - ftrace_trace_function(parent, self_addr, NULL, NULL); - return; - } + ftrace_func(self_addr, parent, function_trace_op, fregs); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || - ftrace_graph_entry != ftrace_graph_entry_stub) { + if (static_branch_unlikely(&ftrace_graph_enable)) { unsigned long *parent_rp; /* calculate pointer to %rp in stack */ @@ -76,3 +78,177 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent, #endif } +#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER) +int ftrace_enable_ftrace_graph_caller(void) +{ + static_key_enable(&ftrace_graph_enable.key); + return 0; +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + static_key_disable(&ftrace_graph_enable.key); + return 0; +} +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + ftrace_func = func; + return 0; +} + +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return 0; +} + +unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; + u32 *tramp; + int size, ret, i; + void *ip; + +#ifdef CONFIG_64BIT + unsigned long addr2 = + (unsigned long)dereference_function_descriptor((void *)addr); + + u32 ftrace_trampoline[] = { + 0x73c10208, /* std,ma r1,100(sp) */ + 0x0c2110c1, /* ldd -10(r1),r1 */ + 0xe820d002, /* bve,n (r1) */ + addr2 >> 32, + addr2 & 0xffffffff, + 0xe83f1fd7, /* b,l,n .-14,r1 */ + }; + + u32 ftrace_trampoline_unaligned[] = { + addr2 >> 32, + addr2 & 0xffffffff, + 0x37de0200, /* ldo 100(sp),sp */ + 0x73c13e01, /* std r1,-100(sp) */ + 0x34213ff9, /* ldo -4(r1),r1 */ + 0x50213fc1, /* ldd -20(r1),r1 */ + 0xe820d002, /* bve,n (r1) */ + 0xe83f1fcf, /* b,l,n .-20,r1 */ + }; + + BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) > + FTRACE_PATCHABLE_FUNCTION_SIZE); +#else + u32 ftrace_trampoline[] = { + (u32)addr, + 0x6fc10080, /* stw,ma r1,40(sp) */ + 0x48213fd1, /* ldw -18(r1),r1 */ + 0xe820c002, /* bv,n r0(r1) */ + 0xe83f1fdf, /* b,l,n .-c,r1 */ + }; +#endif + + BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) > + FTRACE_PATCHABLE_FUNCTION_SIZE); + + size = sizeof(ftrace_trampoline); + tramp = ftrace_trampoline; + +#ifdef CONFIG_64BIT + if (rec->ip & 0x4) { + size = sizeof(ftrace_trampoline_unaligned); + tramp = ftrace_trampoline_unaligned; + } +#endif + + ip = (void *)(rec->ip + 4 - size); + + ret = copy_from_kernel_nofault(insn, ip, size); + if (ret) + return ret; + + for (i = 0; i < size / 4; i++) { + if (insn[i] != INSN_NOP) + return -EINVAL; + } + + __patch_text_multiple(ip, tramp, size); + return 0; +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; + int i; + + for (i = 0; i < ARRAY_SIZE(insn); i++) + insn[i] = INSN_NOP; + + __patch_text((void *)rec->ip, INSN_NOP); + __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), + insn, sizeof(insn)-4); + return 0; +} +#endif + +#ifdef CONFIG_KPROBES_ON_FTRACE +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + struct kprobe *p; + int bit; + + if (unlikely(kprobe_ftrace_disabled)) + return; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto out; + + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + goto out; + } + + __this_cpu_write(current_kprobe, p); + + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + regs->iaoq[0] = ip; + regs->iaoq[1] = ip + 4; + + if (!p->pre_handler || !p->pre_handler(p, regs)) { + regs->iaoq[0] = ip + 4; + regs->iaoq[1] = ip + 8; + + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); +out: + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + return 0; +} +#endif diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index a2058953a53f..357d9cdab7ce 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Hardware descriptions for HP 9000 based hardware, including * system types, SCSI controllers, DMA controllers, HPPB controllers @@ -5,25 +6,11 @@ * * Based on the document "PA-RISC 1.1 I/O Firmware Architecture * Reference Specification", March 7, 1999, version 0.96. This - * is available at http://parisc-linux.org/documentation/ + * is available at + * https://parisc.wiki.kernel.org/index.php/Technical_Documentation * * Copyright 1999 by Alex deVries <alex@onefishtwo.ca> * and copyright 1999 The Puffin Group Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ @@ -36,9 +23,6 @@ * HP PARISC Hardware Database * Access to this database is only possible during bootup * so don't reference this table after starting the init process - * - * NOTE: Product names which are listed here and ends with a '?' - * are guessed. If you know the correct name, please let us know. */ static struct hp_hardware hp_hardware_list[] __initdata = { @@ -225,7 +209,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = { {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, - {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"}, + {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)"}, {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, @@ -279,11 +263,11 @@ static struct hp_hardware hp_hardware_list[] __initdata = { {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, - {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"}, + {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast"}, {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, - {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"}, + {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+"}, {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, @@ -1211,7 +1195,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = { {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, - {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast Core RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index fbb4e43fda05..96e0264ac961 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -17,12 +17,12 @@ #include <asm/pdc.h> #include <asm/assembly.h> -#include <asm/pgtable.h> #include <linux/linkage.h> #include <linux/init.h> +#include <linux/pgtable.h> - .level LEVEL + .level 1.1 __INITDATA ENTRY(boot_args) @@ -35,7 +35,8 @@ END(boot_args) __HEAD .align 4 - .import init_thread_union,data + .import init_task,data + .import init_stack,data .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ #ifndef CONFIG_64BIT .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ @@ -69,6 +70,46 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) +#if defined(CONFIG_PA20) + /* check for 64-bit capable CPU as required by current kernel */ + ldi 32,%r10 + mtctl %r10,%cr11 + .level 2.0 + mfctl,w %cr11,%r10 + .level 1.1 + comib,<>,n 0,%r10,$cpu_ok + + load32 PA(msg1),%arg0 + ldi msg1_end-msg1,%arg1 +$iodc_panic: + copy %arg0, %r10 + copy %arg1, %r11 + load32 PA(init_stack),%sp +#define MEM_CONS 0x3A0 + ldw MEM_CONS+32(%r0),%arg0 // HPA + ldi ENTRY_IO_COUT,%arg1 + ldw MEM_CONS+36(%r0),%arg2 // SPA + ldw MEM_CONS+8(%r0),%arg3 // layers + load32 PA(__bss_start),%r1 + stw %r1,-52(%sp) // arg4 + stw %r0,-56(%sp) // arg5 + stw %r10,-60(%sp) // arg6 = ptr to text + stw %r11,-64(%sp) // arg7 = len + stw %r0,-68(%sp) // arg8 + load32 PA(.iodc_panic_ret), %rp + ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC + bv,n (%r1) +.iodc_panic_ret: + b . /* wait endless with ... */ + or %r10,%r10,%r10 /* qemu idle sleep */ +msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" +msg1_end: + +$cpu_ok: +#endif + + .level PA_ASM_LEVEL + /* Initialize startup VM. Just map first 16/32 MB of memory */ load32 PA(swapper_pg_dir),%r4 mtctl %r4,%cr24 /* Initialize kernel root pointer */ @@ -123,12 +164,12 @@ $pgt_fill_loop: load32 start_parisc,%r11 /* And the initial task pointer */ - load32 init_thread_union,%r6 + load32 init_task,%r6 mtctl %r6,%cr30 /* And the stack pointer too */ - ldo THREAD_SZ_ALGN(%r6),%sp - + load32 init_stack,%sp + tophys_r1 %sp #if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER) .import _mcount,data /* initialize mcount FPTR */ @@ -138,10 +179,10 @@ $pgt_fill_loop: std %dp,0x18(%r10) #endif -#ifdef CONFIG_64BIT - /* Get PDCE_PROC for monarch CPU. */ #define MEM_PDC_LO 0x388 #define MEM_PDC_HI 0x35C +#ifdef CONFIG_64BIT + /* Get PDCE_PROC for monarch CPU. */ ldw MEM_PDC_LO(%r0),%r3 ldw MEM_PDC_HI(%r0),%r10 depd %r10, 31, 32, %r3 /* move to upper word */ @@ -161,6 +202,15 @@ $pgt_fill_loop: /* FALLTHROUGH */ .procend +#ifdef CONFIG_HOTPLUG_CPU + /* common_stext is far away in another section... jump there */ + load32 PA(common_stext), %rp + bv,n (%rp) + + /* common_stext and smp_slave_stext needs to be in text section */ + .text +#endif + /* ** Code Common to both Monarch and Slave processors. ** Entry: @@ -186,12 +236,11 @@ common_stext: #endif /*CONFIG_SMP*/ #ifdef CONFIG_64BIT - tophys_r1 %sp + mfctl %cr30,%r6 /* PCX-W2 firmware bug */ + tophys_r1 %r6 /* Save the rfi target address */ - ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 - tophys_r1 %r10 - std %r11, TASK_PT_GR11(%r10) + STREG %r11, TASK_PT_GR11(%r6) /* Switch to wide mode Superdome doesn't support narrow PDC ** calls. */ @@ -206,7 +255,6 @@ common_stext: ** Someday, palo might not do this for the Monarch either. */ 2: - mfctl %cr30,%r6 /* PCX-W2 firmware bug */ ldo PDC_PSW(%r0),%arg0 /* 21 */ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ @@ -216,15 +264,21 @@ common_stext: copy %r0,%arg3 stext_pdc_ret: + LDREG TASK_PT_GR11(%r6), %r11 + tovirt_r1 %r6 mtctl %r6,%cr30 /* restore task thread info */ +#endif - /* restore rfi target address*/ - ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 - tophys_r1 %r10 - ldd TASK_PT_GR11(%r10), %r11 - tovirt_r1 %sp +#ifndef CONFIG_64BIT + /* clear all BTLBs */ + ldi PDC_BLOCK_TLB,%arg0 + load32 PA(stext_pdc_btlb_ret), %rp + ldw MEM_PDC_LO(%r0),%r3 + bv (%r3) + ldi PDC_BTLB_PURGE_ALL,%arg1 +stext_pdc_btlb_ret: #endif - + /* PARANOID: clear user scratch/user space SR's */ mtsp %r0,%sr0 mtsp %r0,%sr1 @@ -258,7 +312,7 @@ stext_pdc_ret: ldo R%PA(fault_vector_11)(%r10),%r10 $is_pa20: - .level LEVEL /* restore 1.1 || 2.0w */ + .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */ #endif /*!CONFIG_64BIT*/ load32 PA(fault_vector_20),%r10 @@ -287,7 +341,9 @@ aligned_rfi: load32 KERNEL_PSW,%r10 mtctl %r10,%ipsw - + + tovirt_r1 %sp + /* Jump through hyperspace to Virt Mode */ rfi nop @@ -329,13 +385,27 @@ smp_slave_stext: mtsp %r0,%sr6 mtsp %r0,%sr7 +#ifdef CONFIG_64BIT + /* + * Enable Wide mode early, in case the task_struct for the idle + * task in smp_init_current_idle_task was allocated above 4GB. + */ +1: mfia %rp /* clear upper part of pcoq */ + ldo 2f-1b(%rp),%rp + depdi 0,31,32,%rp + bv (%rp) + ssm PSW_SM_W,%r0 +2: +#endif + /* Initialize the SP - monarch sets up smp_init_current_idle_task */ - load32 PA(smp_init_current_idle_task),%sp - LDREG 0(%sp),%sp /* load task address */ + load32 PA(smp_init_current_idle_task),%r6 + LDREG 0(%r6),%r6 + mtctl %r6,%cr30 + tophys_r1 %r6 + LDREG TASK_STACK(%r6),%sp tophys_r1 %sp - LDREG TASK_THREAD_INFO(%sp),%sp - mtctl %sp,%cr30 /* store in cr30 */ - ldo THREAD_SZ_ALGN(%sp),%sp + ldo FRAME_SIZE(%sp),%sp /* point CPU to kernel page tables */ load32 PA(swapper_pg_dir),%r4 @@ -360,10 +430,8 @@ smp_slave_stext: .procend #endif /* CONFIG_SMP */ -ENDPROC(parisc_kernel_start) - #ifndef CONFIG_64BIT - .section .data..read_mostly + .section .data..ro_after_init .align 4 .export $global$,data diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index fde654115564..eb2e4bd67035 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * HPMC (High Priority Machine Check) handler. * * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) * Copyright (C) 2000 Hewlett-Packard (John Marvin) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -56,10 +43,8 @@ * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc. */ - __PAGE_ALIGNED_BSS - .align 4096 -hpmc_stack: - .block 16384 + .import toc_stack,data +#define hpmc_stack toc_stack /* re-use the TOC stack */ #define HPMC_IODC_BUF_SIZE 0x8000 @@ -302,13 +287,3 @@ os_hpmc_6: b . nop .align 16 /* make function length multiple of 16 bytes */ -.os_hpmc_end: - - - __INITRODATA -.globl os_hpmc_size - .align 4 - .type os_hpmc_size, @object - .size os_hpmc_size, 4 -os_hpmc_size: - .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 35d05fdd7483..7ab2f2a54400 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * inventory.c * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries) * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard * @@ -23,6 +19,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/platform_device.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/mmzone.h> @@ -31,6 +28,7 @@ #include <asm/processor.h> #include <asm/page.h> #include <asm/parisc-device.h> +#include <asm/tlbflush.h> /* ** Debug options @@ -38,12 +36,12 @@ */ #undef DEBUG_PAT -int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; +int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL; /* cell number and location (PAT firmware only) */ -unsigned long parisc_cell_num __read_mostly; -unsigned long parisc_cell_loc __read_mostly; -unsigned long parisc_pat_pdc_cap __read_mostly; +unsigned long parisc_cell_num __ro_after_init; +unsigned long parisc_cell_loc __ro_after_init; +unsigned long parisc_pat_pdc_cap __ro_after_init; void __init setup_pdc(void) @@ -638,4 +636,39 @@ void __init do_device_inventory(void) } printk(KERN_INFO "Found devices:\n"); print_parisc_devices(); + +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) + pa_serialize_tlb_flushes = machine_has_merced_bus(); + if (pa_serialize_tlb_flushes) + pr_info("Merced bus found: Enable PxTLB serialization.\n"); +#endif + +#if defined(CONFIG_FW_CFG_SYSFS) + if (running_on_qemu) { + struct resource res[3] = {0,}; + unsigned int base; + + base = ((unsigned long long) PAGE0->pad0[2] << 32) + | PAGE0->pad0[3]; /* SeaBIOS stored it here */ + + res[0].name = "fw_cfg"; + res[0].start = base; + res[0].end = base + 8 - 1; + res[0].flags = IORESOURCE_MEM; + + res[1].name = "ctrl"; + res[1].start = 0; + res[1].flags = IORESOURCE_REG; + + res[2].name = "data"; + res[2].start = 4; + res[2].flags = IORESOURCE_REG; + + if (base) { + pr_info("Found qemu fw_cfg interface at %#08x\n", base); + platform_device_register_simple("fw_cfg", + PLATFORM_DEVID_NONE, res, 3); + } + } +#endif } diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 0ca254085a66..dff66be65d29 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Code to handle x86 style IRQs plus some generic interrupt stuff. * @@ -6,20 +7,6 @@ * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) * Copyright (C) 1999-2000 Grant Grundler * Copyright (c) 2005 Matthew Wilcox - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bitops.h> #include <linux/errno.h> @@ -28,16 +15,15 @@ #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/sched/task_stack.h> #include <asm/io.h> +#include <asm/softirq_stack.h> #include <asm/smp.h> #include <asm/ldcw.h> #undef PARISC_IRQ_CR16_COUNTS -extern irqreturn_t timer_interrupt(int, void *); -extern irqreturn_t ipi_interrupt(int, void *); - #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq)) /* Bits in EIEM correlate with cpu_irq_action[]. @@ -116,25 +102,12 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) if (irqd_is_per_cpu(d)) return -EINVAL; - /* whatever mask they set, we just allow one CPU */ cpu_dest = cpumask_first_and(dest, cpu_online_mask); + if (cpu_dest >= nr_cpu_ids) + cpu_dest = cpumask_first(cpu_online_mask); return cpu_dest; } - -static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, - bool force) -{ - int cpu_dest; - - cpu_dest = cpu_check_affinity(d, dest); - if (cpu_dest < 0) - return -1; - - cpumask_copy(irq_data_get_affinity_mask(d), dest); - - return 0; -} #endif static struct irq_chip cpu_interrupt_type = { @@ -143,9 +116,6 @@ static struct irq_chip cpu_interrupt_type = { .irq_unmask = cpu_unmask_irq, .irq_ack = cpu_ack_irq, .irq_eoi = cpu_eoi_irq, -#ifdef CONFIG_SMP - .irq_set_affinity = cpu_set_affinity_irq, -#endif /* XXX: Needs to be written. We managed without it so far, but * we really ought to write it. */ @@ -175,10 +145,16 @@ int arch_show_interrupts(struct seq_file *p, int prec) # endif #endif #ifdef CONFIG_SMP - seq_printf(p, "%*s: ", prec, "RES"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_puts(p, " Rescheduling interrupts\n"); + if (num_online_cpus() > 1) { + seq_printf(p, "%*s: ", prec, "RES"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); + seq_puts(p, " Rescheduling interrupts\n"); + seq_printf(p, "%*s: ", prec, "CAL"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); + seq_puts(p, " Function call interrupts\n"); + } #endif seq_printf(p, "%*s: ", prec, "UAH"); for_each_online_cpu(j) @@ -220,12 +196,9 @@ int show_interrupts(struct seq_file *p, void *v) if (!action) goto skip; seq_printf(p, "%3d: ", i); -#ifdef CONFIG_SMP + for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); -#else - seq_printf(p, "%10u ", kstat_irqs(i)); -#endif + seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, j)); seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); #ifndef PARISC_IRQ_CR16_COUNTS @@ -339,7 +312,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP struct irq_data *d = irq_get_irq_data(irq); - cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); + irq_data_update_affinity(d, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; @@ -380,7 +353,11 @@ static inline int eirr_to_irq(unsigned long eirr) /* * IRQ STACK - used for irq handler */ +#ifdef CONFIG_64BIT +#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */ +#else #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ +#endif union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; @@ -388,7 +365,7 @@ union irq_stack_union { volatile unsigned int lock[1]; }; -DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { +static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { .slock = { 1,1,1,1 }, }; #endif @@ -401,8 +378,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) #ifdef CONFIG_DEBUG_STACKOVERFLOW #define STACK_MARGIN (256*6) - /* Our stack starts directly behind the thread_info struct. */ - unsigned long stack_start = (unsigned long) current_thread_info(); + unsigned long stack_start = (unsigned long) task_stack_page(current); unsigned long sp = regs->gr[30]; unsigned long stack_usage; unsigned int *last_usage; @@ -478,7 +454,7 @@ static void execute_on_irq_stack(void *func, unsigned long param1) union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); irq_stack = (unsigned long) &union_ptr->stack; irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), - 64); /* align for stack frame usage */ + FRAME_ALIGN); /* align for stack frame usage */ /* We may be called recursive. If we are already using the irq stack, * just continue to use it. Use spinlocks to serialize @@ -501,14 +477,16 @@ static void execute_on_irq_stack(void *func, unsigned long param1) *irq_stack_in_use = 1; } +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK void do_softirq_own_stack(void) { execute_on_irq_stack(__do_softirq, 0); } +#endif #endif /* CONFIG_IRQSTACKS */ /* ONLY called from entry.S:intr_extint() */ -void do_cpu_irq_mask(struct pt_regs *regs) +asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) { struct pt_regs *old_regs; unsigned long eirr_val; @@ -520,7 +498,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) old_regs = set_irq_regs(regs); local_irq_disable(); - irq_enter(); + irq_enter_rcu(); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); if (!eirr_val) @@ -555,7 +533,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) #endif /* CONFIG_IRQSTACKS */ out: - irq_exit(); + irq_exit_rcu(); set_irq_regs(old_regs); return; @@ -564,37 +542,27 @@ void do_cpu_irq_mask(struct pt_regs *regs) goto out; } -static struct irqaction timer_action = { - .handler = timer_interrupt, - .name = "timer", - .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, -}; - -#ifdef CONFIG_SMP -static struct irqaction ipi_action = { - .handler = ipi_interrupt, - .name = "IPI", - .flags = IRQF_PERCPU, -}; -#endif - static void claim_cpu_irqs(void) { + unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL; int i; + for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { irq_set_chip_and_handler(i, &cpu_interrupt_type, handle_percpu_irq); } irq_set_handler(TIMER_IRQ, handle_percpu_irq); - setup_irq(TIMER_IRQ, &timer_action); + if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL)) + pr_err("Failed to register timer interrupt\n"); #ifdef CONFIG_SMP irq_set_handler(IPI_IRQ, handle_percpu_irq); - setup_irq(IPI_IRQ, &ipi_action); + if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL)) + pr_err("Failed to register IPI interrupt\n"); #endif } -void __init init_IRQ(void) +void init_IRQ(void) { local_irq_disable(); /* PARANOID - should already be disabled */ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c new file mode 100644 index 000000000000..ea51f15bf0e6 --- /dev/null +++ b/arch/parisc/kernel/jump_label.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Helge Deller <deller@gmx.de> + * + * Based on arch/arm64/kernel/jump_label.c + */ +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <linux/bug.h> +#include <asm/alternative.h> +#include <asm/text-patching.h> + +static inline int reassemble_17(int as17) +{ + return (((as17 & 0x10000) >> 16) | + ((as17 & 0x0f800) << 5) | + ((as17 & 0x00400) >> 8) | + ((as17 & 0x003ff) << 3)); +} + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + void *target = (void *)jump_entry_target(entry); + int distance = target - addr; + /* + * Encode the PA1.1 "b,n" instruction with a 17-bit + * displacement. In case we hit the BUG(), we could use + * another branch instruction with a 22-bit displacement on + * 64-bit CPUs instead. But this seems sufficient for now. + */ + distance -= 8; + BUG_ON(distance > 262143 || distance < -262144); + insn = 0xe8000002 | reassemble_17(distance >> 2); + } else { + insn = INSN_NOP; + } + + patch_text(addr, insn); +} diff --git a/arch/parisc/kernel/kexec.c b/arch/parisc/kernel/kexec.c new file mode 100644 index 000000000000..db57345a9daf --- /dev/null +++ b/arch/parisc/kernel/kexec.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/kexec.h> +#include <linux/delay.h> +#include <linux/reboot.h> + +#include <asm/cacheflush.h> +#include <asm/sections.h> + +extern void relocate_new_kernel(unsigned long head, + unsigned long start, + unsigned long phys); + +extern const unsigned int relocate_new_kernel_size; +extern unsigned int kexec_initrd_start_offset; +extern unsigned int kexec_initrd_end_offset; +extern unsigned int kexec_cmdline_offset; +extern unsigned int kexec_free_mem_offset; + +static void kexec_show_segment_info(const struct kimage *kimage, + unsigned long n) +{ + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", + n, + kimage->segment[n].mem, + kimage->segment[n].mem + kimage->segment[n].memsz, + (unsigned long)kimage->segment[n].memsz, + (unsigned long)kimage->segment[n].memsz / PAGE_SIZE); +} + +static void kexec_image_info(const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("kexec kimage info:\n"); + pr_debug(" type: %d\n", kimage->type); + pr_debug(" start: %lx\n", kimage->start); + pr_debug(" head: %lx\n", kimage->head); + pr_debug(" nr_segments: %lu\n", kimage->nr_segments); + + for (i = 0; i < kimage->nr_segments; i++) + kexec_show_segment_info(kimage, i); + +#ifdef CONFIG_KEXEC_FILE + if (kimage->file_mode) { + pr_debug("cmdline: %.*s\n", (int)kimage->cmdline_buf_len, + kimage->cmdline_buf); + } +#endif +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + +void machine_shutdown(void) +{ + smp_send_stop(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +} + +void machine_kexec(struct kimage *image) +{ +#ifdef CONFIG_64BIT + Elf64_Fdesc desc; +#endif + void (*reloc)(unsigned long head, + unsigned long start, + unsigned long phys); + + unsigned long phys = page_to_phys(image->control_code_page); + void *virt = (void *)__fix_to_virt(FIX_TEXT_KEXEC); + struct kimage_arch *arch = &image->arch; + + set_fixmap(FIX_TEXT_KEXEC, phys); + + flush_cache_all(); + +#ifdef CONFIG_64BIT + reloc = (void *)&desc; + desc.addr = (long long)virt; +#else + reloc = (void *)virt; +#endif + + memcpy(virt, dereference_function_descriptor(relocate_new_kernel), + relocate_new_kernel_size); + + *(unsigned long *)(virt + kexec_cmdline_offset) = arch->cmdline; + *(unsigned long *)(virt + kexec_initrd_start_offset) = arch->initrd_start; + *(unsigned long *)(virt + kexec_initrd_end_offset) = arch->initrd_end; + *(unsigned long *)(virt + kexec_free_mem_offset) = PAGE0->mem_free; + + flush_cache_all(); + flush_tlb_all(); + local_irq_disable(); + + reloc(image->head & PAGE_MASK, image->start, phys); +} + +int machine_kexec_prepare(struct kimage *image) +{ + kexec_image_info(image); + return 0; +} diff --git a/arch/parisc/kernel/kexec_file.c b/arch/parisc/kernel/kexec_file.c new file mode 100644 index 000000000000..3fc82130b6c3 --- /dev/null +++ b/arch/parisc/kernel/kexec_file.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Load ELF vmlinux file for the kexec_file_load syscall. + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + * + */ +#include <linux/elf.h> +#include <linux/kexec.h> +#include <linux/libfdt.h> +#include <linux/module.h> +#include <linux/of_fdt.h> +#include <linux/slab.h> +#include <linux/types.h> + +static void *elf_load(struct kimage *image, char *kernel_buf, + unsigned long kernel_len, char *initrd, + unsigned long initrd_len, char *cmdline, + unsigned long cmdline_len) +{ + int ret, i; + unsigned long kernel_load_addr; + struct elfhdr ehdr; + struct kexec_elf_info elf_info; + struct kexec_buf kbuf = { .image = image, .buf_min = 0, + .buf_max = -1UL, }; + + ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); + if (ret) + goto out; + + ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr); + if (ret) + goto out; + + image->start = __pa(elf_info.ehdr->e_entry); + + for (i = 0; i < image->nr_segments; i++) + image->segment[i].mem = __pa(image->segment[i].mem); + + kexec_dprintk("Loaded the kernel at 0x%lx, entry at 0x%lx\n", + kernel_load_addr, image->start); + + if (initrd != NULL) { + kbuf.buffer = initrd; + kbuf.bufsz = kbuf.memsz = initrd_len; + kbuf.buf_align = PAGE_SIZE; + kbuf.top_down = false; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out; + + kexec_dprintk("Loaded initrd at 0x%lx\n", kbuf.mem); + image->arch.initrd_start = kbuf.mem; + image->arch.initrd_end = kbuf.mem + initrd_len; + } + + if (cmdline != NULL) { + kbuf.buffer = cmdline; + kbuf.bufsz = kbuf.memsz = ALIGN(cmdline_len, 8); + kbuf.buf_align = PAGE_SIZE; + kbuf.top_down = false; + kbuf.buf_min = PAGE0->mem_free + PAGE_SIZE; + kbuf.buf_max = kernel_load_addr; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + ret = kexec_add_buffer(&kbuf); + if (ret) + goto out; + + kexec_dprintk("Loaded cmdline at 0x%lx\n", kbuf.mem); + image->arch.cmdline = kbuf.mem; + } +out: + return NULL; +} + +const struct kexec_file_ops kexec_elf_ops = { + .probe = kexec_elf_probe, + .load = elf_load, +}; + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &kexec_elf_ops, + NULL +}; diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c new file mode 100644 index 000000000000..fee81f877525 --- /dev/null +++ b/arch/parisc/kernel/kgdb.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PA-RISC KGDB support + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + * Copyright (c) 2022 Helge Deller <deller@gmx.de> + * + */ + +#include <linux/kgdb.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/notifier.h> +#include <linux/kdebug.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/traps.h> +#include <asm/processor.h> +#include <asm/text-patching.h> +#include <asm/cacheflush.h> + +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = { 0x03, 0xff, 0xa0, 0x1f } +}; + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + return NOTIFY_STOP; +} + +static int kgdb_notify(struct notifier_block *self, + unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + .priority = -INT_MAX, +}; + +int kgdb_arch_init(void) +{ + return register_die_notifier(&kgdb_notifier); +} + +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs; + + memset(gr, 0, sizeof(struct parisc_gdb_regs)); + + memcpy(gr->gpr, regs->gr, sizeof(gr->gpr)); + memcpy(gr->fr, regs->fr, sizeof(gr->fr)); + + gr->sr0 = regs->sr[0]; + gr->sr1 = regs->sr[1]; + gr->sr2 = regs->sr[2]; + gr->sr3 = regs->sr[3]; + gr->sr4 = regs->sr[4]; + gr->sr5 = regs->sr[5]; + gr->sr6 = regs->sr[6]; + gr->sr7 = regs->sr[7]; + + gr->sar = regs->sar; + gr->iir = regs->iir; + gr->isr = regs->isr; + gr->ior = regs->ior; + gr->ipsw = regs->ipsw; + gr->cr27 = regs->cr27; + + gr->iaoq_f = regs->iaoq[0]; + gr->iasq_f = regs->iasq[0]; + + gr->iaoq_b = regs->iaoq[1]; + gr->iasq_b = regs->iasq[1]; +} + +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) +{ + struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs; + + + memcpy(regs->gr, gr->gpr, sizeof(regs->gr)); + memcpy(regs->fr, gr->fr, sizeof(regs->fr)); + + regs->sr[0] = gr->sr0; + regs->sr[1] = gr->sr1; + regs->sr[2] = gr->sr2; + regs->sr[3] = gr->sr3; + regs->sr[4] = gr->sr4; + regs->sr[5] = gr->sr5; + regs->sr[6] = gr->sr6; + regs->sr[7] = gr->sr7; + + regs->sar = gr->sar; + regs->iir = gr->iir; + regs->isr = gr->isr; + regs->ior = gr->ior; + regs->ipsw = gr->ipsw; + regs->cr27 = gr->cr27; + + regs->iaoq[0] = gr->iaoq_f; + regs->iasq[0] = gr->iasq_f; + + regs->iaoq[1] = gr->iaoq_b; + regs->iasq[1] = gr->iasq_b; +} + +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, + struct task_struct *task) +{ + struct pt_regs *regs = task_pt_regs(task); + unsigned long gr30, iaoq; + + gr30 = regs->gr[30]; + iaoq = regs->iaoq[0]; + + regs->gr[30] = regs->ksp; + regs->iaoq[0] = regs->kpc; + pt_regs_to_gdb_regs(gdb_regs, regs); + + regs->gr[30] = gr30; + regs->iaoq[0] = iaoq; + +} + +static void step_instruction_queue(struct pt_regs *regs) +{ + regs->iaoq[0] = regs->iaoq[1]; + regs->iaoq[1] += 4; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->iaoq[0] = ip; + regs->iaoq[1] = ip + 4; +} + +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int ret = copy_from_kernel_nofault(bpt->saved_instr, + (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + if (ret) + return ret; + + __patch_text((void *)bpt->bpt_addr, + *(unsigned int *)&arch_kgdb_ops.gdb_bpt_instr); + return ret; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ + __patch_text((void *)bpt->bpt_addr, *(unsigned int *)&bpt->saved_instr); + return 0; +} + +int kgdb_arch_handle_exception(int trap, int signo, + int err_code, char *inbuf, char *outbuf, + struct pt_regs *regs) +{ + unsigned long addr; + char *p = inbuf + 1; + + switch (inbuf[0]) { + case 'D': + case 'c': + case 'k': + kgdb_contthread = NULL; + kgdb_single_step = 0; + + if (kgdb_hex2long(&p, &addr)) + kgdb_arch_set_pc(regs, addr); + else if (trap == 9 && regs->iir == + PARISC_KGDB_COMPILED_BREAK_INSN) + step_instruction_queue(regs); + return 0; + case 's': + kgdb_single_step = 1; + if (kgdb_hex2long(&p, &addr)) { + kgdb_arch_set_pc(regs, addr); + } else if (trap == 9 && regs->iir == + PARISC_KGDB_COMPILED_BREAK_INSN) { + step_instruction_queue(regs); + mtctl(-1, 0); + } else { + mtctl(0, 0); + } + regs->gr[0] |= PSW_R; + return 0; + + } + return -1; +} diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c new file mode 100644 index 000000000000..9255adba67a3 --- /dev/null +++ b/arch/parisc/kernel/kprobes.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/parisc/kernel/kprobes.c + * + * PA-RISC kprobes implementation + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + * Copyright (c) 2022 Helge Deller <deller@gmx.de> + */ + +#include <linux/types.h> +#include <linux/kprobes.h> +#include <linux/slab.h> +#include <asm/cacheflush.h> +#include <asm/text-patching.h> + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + if ((unsigned long)p->addr & 3UL) + return -EINVAL; + + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) + return -ENOMEM; + + /* + * Set up new instructions. Second break instruction will + * trigger call of parisc_kprobe_ss_handler(). + */ + p->opcode = *p->addr; + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = PARISC_KPROBES_BREAK_INSN2; + + flush_insn_slot(p); + return 0; +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (!p->ainsn.insn) + return; + + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, PARISC_KPROBES_BREAK_INSN); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static inline void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +static void __kprobes setup_singlestep(struct kprobe *p, + struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + kcb->iaoq[0] = regs->iaoq[0]; + kcb->iaoq[1] = regs->iaoq[1]; + instruction_pointer_set(regs, (unsigned long)p->ainsn.insn); +} + +int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + preempt_disable(); + + kcb = get_kprobe_ctlblk(); + p = get_kprobe((unsigned long *)regs->iaoq[0]); + + if (!p) { + preempt_enable_no_resched(); + return 0; + } + + if (kprobe_running()) { + /* + * We have reentered the kprobe_handler, since another kprobe + * was hit while within the handler, we save the original + * kprobes and single step on the instruction of the new probe + * without calling any user handlers to avoid recursive + * kprobes. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p); + kprobes_inc_nmissed_count(p); + setup_singlestep(p, kcb, regs); + kcb->kprobe_status = KPROBE_REENTER; + return 1; + } + + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* If we have no pre-handler or it returned 0, we continue with + * normal processing. If we have a pre-handler and it returned + * non-zero - which means user handler setup registers to exit + * to another instruction, we must skip the single stepping. + */ + + if (!p->pre_handler || !p->pre_handler(p, regs)) { + setup_singlestep(p, kcb, regs); + kcb->kprobe_status = KPROBE_HIT_SS; + } else { + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 1; +} + +int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + struct kprobe *p = kprobe_running(); + + if (!p) + return 0; + + if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4) + return 0; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return 1; + } + + /* for absolute branch instructions we can copy iaoq_b. for relative + * branch instructions we need to calculate the new address based on the + * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without + * modifications because it's based on our ainsn.insn address. + */ + + if (p->post_handler) + p->post_handler(p, regs, 0); + + switch (regs->iir >> 26) { + case 0x38: /* BE */ + case 0x39: /* BE,L */ + case 0x3a: /* BV */ + case 0x3b: /* BVE */ + /* for absolute branches, regs->iaoq[1] has already the right + * address + */ + regs->iaoq[0] = kcb->iaoq[1]; + break; + default: + regs->iaoq[0] = kcb->iaoq[1]; + regs->iaoq[1] = regs->iaoq[0] + 4; + break; + } + kcb->kprobe_status = KPROBE_HIT_SSDONE; + reset_current_kprobe(); + return 1; +} + +void __kretprobe_trampoline(void) +{ + asm volatile("nop"); + asm volatile("nop"); +} + +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs); + +static struct kprobe trampoline_p = { + .pre_handler = trampoline_probe_handler +}; + +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + __kretprobe_trampoline_handler(regs, NULL); + + return 1; +} + +void arch_kretprobe_fixup_return(struct pt_regs *regs, + kprobe_opcode_t *correct_ret_addr) +{ + regs->gr[2] = (unsigned long)correct_ret_addr; +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->gr[2]; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr. */ + regs->gr[2] = (unsigned long)trampoline_p.addr; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return p->addr == trampoline_p.addr; +} + +int __init arch_init_kprobes(void) +{ + trampoline_p.addr = (kprobe_opcode_t *) + dereference_function_descriptor(__kretprobe_trampoline); + return register_kprobe(&trampoline_p); +} diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 43778420614b..4e5d991b2b65 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -1,29 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Kernel dynamically loadable module help for PARISC. * * The best reference for this stuff is probably the Processor- * Specific ELF Supplement for PA-RISC: - * http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf + * https://parisc.wiki.kernel.org/index.php/File:Elf-pa-hp.pdf * - * Linux/PA-RISC Project (http://www.parisc-linux.org/) + * Linux/PA-RISC Project * Copyright (C) 2003 Randolph Chung <tausq at debian . org> * Copyright (C) 2008 Helge Deller <deller@gmx.de> * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * * Notes: * - PLT stub handling * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or @@ -42,38 +27,31 @@ * We are not doing SEGREL32 handling correctly. According to the ABI, we * should do a value offset, like this: * if (in_init(me, (void *)val)) - * val -= (uint32_t)me->init_layout.base; + * val -= (uint32_t)me->mem[MOD_INIT_TEXT].base; * else - * val -= (uint32_t)me->core_layout.base; + * val -= (uint32_t)me->mem[MOD_TEXT].base; * However, SEGREL32 is used only for PARISC unwind entries, and we want * those entries to have an absolute address, and not just an offset. * - * The unwind table mechanism has the ability to specify an offset for + * The unwind table mechanism has the ability to specify an offset for * the unwind table; however, because we split off the init functions into - * a different piece of memory, it is not possible to do this using a + * a different piece of memory, it is not possible to do this using a * single offset. Instead, we use the above hack for now. */ #include <linux/moduleloader.h> #include <linux/elf.h> -#include <linux/vmalloc.h> #include <linux/fs.h> +#include <linux/ftrace.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/slab.h> -#include <asm/pgtable.h> #include <asm/unwind.h> #include <asm/sections.h> -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(fmt...) -#endif - #define RELOC_REACHABLE(val, bits) \ (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \ @@ -97,25 +75,6 @@ * allows us to allocate up to 4095 GOT entries. */ #define MAX_GOTS 4095 -/* three functions to determine where in the module core - * or init pieces the location is */ -static inline int in_init(struct module *me, void *loc) -{ - return (loc >= me->init_layout.base && - loc <= (me->init_layout.base + me->init_layout.size)); -} - -static inline int in_core(struct module *me, void *loc) -{ - return (loc >= me->core_layout.base && - loc <= (me->core_layout.base + me->core_layout.size)); -} - -static inline int in_local(struct module *me, void *loc) -{ - return in_init(me, loc) || in_core(me, loc); -} - #ifndef CONFIG_64BIT struct got_entry { Elf32_Addr addr; @@ -213,17 +172,6 @@ static inline int reassemble_22(int as22) ((as22 & 0x0003ff) << 3)); } -void *module_alloc(unsigned long size) -{ - /* using RWX means less protection for modules, but it's - * easier than trying to map the text, data, init_text and - * init_data correctly */ - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, - PAGE_KERNEL_RWX, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - #ifndef CONFIG_64BIT static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n) { @@ -315,7 +263,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, * sizeof(struct stub_entry); } -#define CONST +#define CONST int module_frob_arch_sections(CONST Elf_Ehdr *hdr, CONST Elf_Shdr *sechdrs, CONST char *secstrings, @@ -323,6 +271,7 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, { unsigned long gots = 0, fdescs = 0, len; unsigned int i; + struct module_memory *mod_mem; len = hdr->e_shnum * sizeof(me->arch.section[0]); me->arch.section = kzalloc(len, GFP_KERNEL); @@ -367,14 +316,15 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, me->arch.section[s].stub_entries += count; } + mod_mem = &me->mem[MOD_TEXT]; /* align things a bit */ - me->core_layout.size = ALIGN(me->core_layout.size, 16); - me->arch.got_offset = me->core_layout.size; - me->core_layout.size += gots * sizeof(struct got_entry); + mod_mem->size = ALIGN(mod_mem->size, 16); + me->arch.got_offset = mod_mem->size; + mod_mem->size += gots * sizeof(struct got_entry); - me->core_layout.size = ALIGN(me->core_layout.size, 16); - me->arch.fdesc_offset = me->core_layout.size; - me->core_layout.size += fdescs * sizeof(Elf_Fdesc); + mod_mem->size = ALIGN(mod_mem->size, 16); + me->arch.fdesc_offset = mod_mem->size; + mod_mem->size += fdescs * sizeof(Elf_Fdesc); me->arch.got_max = gots; me->arch.fdesc_max = fdescs; @@ -392,7 +342,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) BUG_ON(value == 0); - got = me->core_layout.base + me->arch.got_offset; + got = me->mem[MOD_TEXT].base + me->arch.got_offset; for (i = 0; got[i].addr; i++) if (got[i].addr == value) goto out; @@ -401,7 +351,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) got[i].addr = value; out: - DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry), + pr_debug("GOT ENTRY %d[%lx] val %lx\n", i, i*sizeof(struct got_entry), value); return i * sizeof(struct got_entry); } @@ -410,7 +360,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) #ifdef CONFIG_64BIT static Elf_Addr get_fdesc(struct module *me, unsigned long value) { - Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; + Elf_Fdesc *fdesc = me->mem[MOD_TEXT].base + me->arch.fdesc_offset; if (!value) { printk(KERN_ERR "%s: zero OPD requested!\n", me->name); @@ -428,7 +378,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) /* Create new one */ fdesc->addr = value; - fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; + fdesc->gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset; return (Elf_Addr)fdesc; } #endif /* CONFIG_64BIT */ @@ -554,7 +504,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, //unsigned long dp = (unsigned long)$global$; register unsigned long dp asm ("r27"); - DEBUGP("Applying relocate section %u to %u\n", relsec, + pr_debug("Applying relocate section %u to %u\n", relsec, targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ @@ -578,7 +528,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, #if 0 #define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t : - DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n", + pr_debug("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n", strtab + sym->st_name, (uint32_t)loc, val, addend, r(R_PARISC_PLABEL32) @@ -619,7 +569,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, /* See note about special handling of SEGREL32 at * the beginning of this file. */ - *loc = fsel(val, addend); + *loc = fsel(val, addend); break; case R_PARISC_SECREL32: /* 32-bit section relative address. */ @@ -698,7 +648,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, Elf_Addr loc0; unsigned int targetsec = sechdrs[relsec].sh_info; - DEBUGP("Applying relocate section %u to %u\n", relsec, + pr_debug("Applying relocate section %u to %u\n", relsec, targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ @@ -740,7 +690,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, case R_PARISC_LTOFF21L: /* LT-relative; left 21 bits */ val = get_got(me, val, addend); - DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n", + pr_debug("LTOFF21L Symbol %s loc %p val %llx\n", strtab + sym->st_name, loc, val); val = lrsel(val, 0); @@ -751,19 +701,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs, /* LT-relative; right 14 bits */ val = get_got(me, val, addend); val = rrsel(val, 0); - DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n", + pr_debug("LTOFF14R Symbol %s loc %p val %llx\n", strtab + sym->st_name, loc, val); *loc = mask(*loc, 14) | reassemble_14(val); break; case R_PARISC_PCREL22F: /* PC-relative; 22 bits */ - DEBUGP("PCREL22F Symbol %s loc %p val %lx\n", + pr_debug("PCREL22F Symbol %s loc %p val %llx\n", strtab + sym->st_name, loc, val); val += addend; /* can we reach it locally? */ - if (in_local(me, (void *)val)) { + if (within_module(val, me)) { /* this is the case where the symbol is local * to the module, but in a different section, * so stub the jump in case it's more than 22 @@ -790,7 +740,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, val = get_stub(me, val, addend, ELF_STUB_GOT, loc0, targetsec); } - DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", + pr_debug("STUB FOR %s loc %px, val %llx+%llx at %llx\n", strtab + sym->st_name, loc, sym->st_value, addend, val); val = (val - dot - 8)/4; @@ -801,6 +751,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, /* 32-bit PC relative address */ *loc = val - dot - 8 + addend; break; + case R_PARISC_PCREL64: + /* 64-bit PC relative address */ + *loc64 = val - dot - 8 + addend; + break; case R_PARISC_DIR64: /* 64-bit effective address */ *loc64 = val + addend; @@ -810,7 +764,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, /* See note about special handling of SEGREL32 at * the beginning of this file. */ - *loc = fsel(val, addend); + *loc = fsel(val, addend); break; case R_PARISC_SECREL32: /* 32-bit section relative address. */ @@ -818,16 +772,16 @@ int apply_relocate_add(Elf_Shdr *sechdrs, break; case R_PARISC_FPTR64: /* 64-bit function address */ - if(in_local(me, (void *)(val + addend))) { + if (within_module(val + addend, me)) { *loc64 = get_fdesc(me, val+addend); - DEBUGP("FDESC for %s at %p points to %lx\n", + pr_debug("FDESC for %s at %llx points to %llx\n", strtab + sym->st_name, *loc64, ((Elf_Fdesc *)*loc64)->addr); } else { /* if the symbol is not local to this * module then val+addend is a pointer * to the function descriptor */ - DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n", + pr_debug("Non local FPTR64 Symbol %s loc %p val %llx\n", strtab + sym->st_name, loc, val); *loc64 = val + addend; @@ -856,9 +810,9 @@ register_unwind_table(struct module *me, table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; end = table + sechdrs[me->arch.unwind_section].sh_size; - gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; + gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset; - DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", + pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end); } @@ -879,6 +833,7 @@ int module_finalize(const Elf_Ehdr *hdr, const char *strtab = NULL; const Elf_Shdr *s; char *secstrings; + int symindex __maybe_unused = -1; Elf_Sym *newptr, *oldptr; Elf_Shdr *symhdr = NULL; #ifdef DEBUG @@ -905,6 +860,7 @@ int module_finalize(const Elf_Ehdr *hdr, if(sechdrs[i].sh_type == SHT_SYMTAB && (sechdrs[i].sh_flags & SHF_ALLOC)) { int strindex = sechdrs[i].sh_link; + symindex = i; /* FIXME: AWFUL HACK * The cast is to drop the const from * the sechdrs pointer */ @@ -914,7 +870,7 @@ int module_finalize(const Elf_Ehdr *hdr, } } - DEBUGP("module %s: strtab %p, symhdr %p\n", + pr_debug("module %s: strtab %p, symhdr %p\n", me->name, strtab, symhdr); if(me->arch.got_count > MAX_GOTS) { @@ -933,7 +889,7 @@ int module_finalize(const Elf_Ehdr *hdr, oldptr = (void *)symhdr->sh_addr; newptr = oldptr + 1; /* we start counting at 1 */ nsyms = symhdr->sh_size / sizeof(Elf_Sym); - DEBUGP("OLD num_symtab %lu\n", nsyms); + pr_debug("OLD num_symtab %lu\n", nsyms); for (i = 1; i < nsyms; i++) { oldptr++; /* note, count starts at 1 so preincrement */ @@ -948,7 +904,7 @@ int module_finalize(const Elf_Ehdr *hdr, } nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; - DEBUGP("NEW num_symtab %lu\n", nsyms); + pr_debug("NEW num_symtab %lu\n", nsyms); symhdr->sh_size = nsyms * sizeof(Elf_Sym); /* find .altinstructions section */ @@ -960,8 +916,27 @@ int module_finalize(const Elf_Ehdr *hdr, if (!strcmp(".altinstructions", secname)) /* patch .altinstructions */ apply_alternatives(aseg, aseg + s->sh_size, me->name); - } +#ifdef CONFIG_DYNAMIC_FTRACE + /* For 32 bit kernels we're compiling modules with + * -ffunction-sections so we must relocate the addresses in the + * ftrace callsite section. + */ + if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) { + int err; + if (s->sh_type == SHT_REL) + err = apply_relocate((Elf_Shdr *)sechdrs, + strtab, symindex, + s - sechdrs, me); + else if (s->sh_type == SHT_RELA) + err = apply_relocate_add((Elf_Shdr *)sechdrs, + strtab, symindex, + s - sechdrs, me); + if (err) + return err; + } +#endif + } return 0; } @@ -973,7 +948,7 @@ void module_arch_cleanup(struct module *mod) #ifdef CONFIG_64BIT void *dereference_module_function_descriptor(struct module *mod, void *ptr) { - unsigned long start_opd = (Elf64_Addr)mod->core_layout.base + + unsigned long start_opd = (Elf64_Addr)mod->mem[MOD_TEXT].base + mod->arch.fdesc_offset; unsigned long end_opd = start_opd + mod->arch.fdesc_count * sizeof(Elf64_Fdesc); diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c deleted file mode 100644 index 0d770ac83f70..000000000000 --- a/arch/parisc/kernel/pa7300lc.c +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/parisc/kernel/pa7300lc.c - * - PA7300LC-specific functions - * - * Copyright (C) 2000 Philipp Rumpf */ - -#include <linux/sched.h> -#include <linux/sched/debug.h> -#include <linux/smp.h> -#include <linux/kernel.h> -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> - -/* CPU register indices */ - -#define MIOC_STATUS 0xf040 -#define MIOC_CONTROL 0xf080 -#define MDERRADD 0xf0e0 -#define DMAERR 0xf0e8 -#define DIOERR 0xf0ec -#define HIDMAMEM 0xf0f4 - -/* this returns the HPA of the CPU it was called on */ -static u32 cpu_hpa(void) -{ - return 0xfffb0000; -} - -static void pa7300lc_lpmc(int code, struct pt_regs *regs) -{ - u32 hpa; - printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id()); - - show_regs(regs); - - hpa = cpu_hpa(); - printk(KERN_WARNING - "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n" - "MDERRADD %08x\n" "DMAERR %08x\n" - "DIOERR %08x\n" "HIDMAMEM %08x\n", - gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS), - gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR), - gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM)); -} - -void pa7300lc_init(void) -{ - cpu_lpmc = pa7300lc_lpmc; -} diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 187f032c9dd8..541370d14559 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PARISC TLB and cache flushing support * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin) * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org) * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -34,12 +21,12 @@ #include <asm/psw.h> #include <asm/assembly.h> -#include <asm/pgtable.h> #include <asm/cache.h> #include <asm/ldcw.h> #include <asm/alternative.h> #include <linux/linkage.h> #include <linux/init.h> +#include <linux/pgtable.h> .section .text.hot .align 16 @@ -76,7 +63,7 @@ ENTRY_CFI(flush_tlb_all_local) /* Flush Instruction Tlb */ - LDREG ITLB_SID_BASE(%r1), %r20 +88: LDREG ITLB_SID_BASE(%r1), %r20 LDREG ITLB_SID_STRIDE(%r1), %r21 LDREG ITLB_SID_COUNT(%r1), %r22 LDREG ITLB_OFF_BASE(%r1), %arg0 @@ -116,6 +103,7 @@ fitonemiddle: /* Loop if LOOP = 1 */ add %r21, %r20, %r20 /* increment space */ fitdone: + ALTERNATIVE(88b, fitdone, ALT_COND_NO_SPLIT_TLB, INSN_NOP) /* Flush Data Tlb */ @@ -186,6 +174,15 @@ fdtdone: 2: bv %r0(%r2) nop + + /* + * When running in qemu, drop whole flush_tlb_all_local function and + * replace by one pdtlbe instruction, for which QEMU will drop all + * local TLB entries. + */ +3: pdtlbe %r0(%sr1,%r0) + bv,n %r0(%r2) + ALTERNATIVE_CODE(flush_tlb_all_local, 2, ALT_COND_RUN_ON_QEMU, 3b) ENDPROC_CFI(flush_tlb_all_local) .import cache_info,data @@ -303,7 +300,6 @@ fdoneloop2: fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ fdsync: - syncdma sync mtsm %r22 /* restore I-bit */ 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) @@ -311,39 +307,6 @@ fdsync: nop ENDPROC_CFI(flush_data_cache_local) -/* Macros to serialize TLB purge operations on SMP. */ - - .macro tlb_lock la,flags,tmp -#ifdef CONFIG_SMP -98: -#if __PA_LDCW_ALIGNMENT > 4 - load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la - depi 0,31,__PA_LDCW_ALIGN_ORDER, \la -#else - load32 pa_tlb_lock, \la -#endif - rsm PSW_SM_I,\flags -1: LDCW 0(\la),\tmp - cmpib,<>,n 0,\tmp,3f -2: ldw 0(\la),\tmp - cmpb,<> %r0,\tmp,1b - nop - b,n 2b -3: -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - .endm - - .macro tlb_unlock la,flags,tmp -#ifdef CONFIG_SMP -98: ldi 1,\tmp - sync - stw \tmp,0(\la) - mtsm \flags -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - .endm - /* Clear page using kernel mapping. */ ENTRY_CFI(clear_page_asm) @@ -524,6 +487,8 @@ ENDPROC_CFI(copy_page_asm) * parisc chip designers that there will not ever be a parisc * chip with a larger alias boundary (Never say never :-) ). * + * Yah, what about the PA8800 and PA8900 processors? + * * Subtle: the dtlb miss handlers support the temp alias region by * "knowing" that if a dtlb miss happens within the temp alias * region it must have occurred while in clear_user_page. Since @@ -535,19 +500,10 @@ ENDPROC_CFI(copy_page_asm) * miss on the translation, the dtlb miss handler inserts the * translation into the tlb using these values: * - * %r26 physical page (shifted for tlb insert) of "to" translation - * %r23 physical page (shifted for tlb insert) of "from" translation + * %r26 physical address of "to" translation + * %r23 physical address of "from" translation */ - /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ - #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) - .macro convert_phys_for_tlb_insert20 phys - extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys -#if _PAGE_SIZE_ENCODING_DEFAULT - depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys -#endif - .endm - /* * copy_user_page_asm() performs a page copy using mappings * equivalent to the user page mappings. It can be used to @@ -576,24 +532,10 @@ ENTRY_CFI(copy_user_page_asm) sub %r25, %r1, %r23 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ - depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ + dep_safe %r24, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ copy %r28, %r29 - depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ - depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ - copy %r28, %r29 - depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ -#endif + depi_safe 1, 31-TMPALIAS_SIZE_BITS,1, %r29 /* Form aliased virtual address 'from' */ /* Purge any old translations */ @@ -601,10 +543,8 @@ ENTRY_CFI(copy_user_page_asm) pdtlb,l %r0(%r28) pdtlb,l %r0(%r29) #else - tlb_lock %r20,%r21,%r22 0: pdtlb %r0(%r28) 1: pdtlb %r0(%r29) - tlb_unlock %r20,%r21,%r22 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif @@ -725,27 +665,15 @@ ENTRY_CFI(clear_user_page_asm) tophys_r1 %r26 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ #ifdef CONFIG_PA20 pdtlb,l %r0(%r28) #else - tlb_lock %r20,%r21,%r22 0: pdtlb %r0(%r28) - tlb_unlock %r20,%r21,%r22 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif @@ -803,27 +731,15 @@ ENDPROC_CFI(clear_user_page_asm) ENTRY_CFI(flush_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ #ifdef CONFIG_PA20 pdtlb,l %r0(%r28) #else - tlb_lock %r20,%r21,%r22 0: pdtlb %r0(%r28) - tlb_unlock %r20,%r21,%r22 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif @@ -864,27 +780,15 @@ ENDPROC_CFI(flush_dcache_page_asm) ENTRY_CFI(purge_dcache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation */ #ifdef CONFIG_PA20 pdtlb,l %r0(%r28) #else - tlb_lock %r20,%r21,%r22 0: pdtlb %r0(%r28) - tlb_unlock %r20,%r21,%r22 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif @@ -925,18 +829,8 @@ ENDPROC_CFI(purge_dcache_page_asm) ENTRY_CFI(flush_icache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef CONFIG_64BIT -#if (TMPALIAS_MAP_START >= 0x80000000) - depdi 0, 31,32, %r28 /* clear any sign extension */ -#endif - convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ - depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#else - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ - depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ -#endif + dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */ + depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ /* Purge any old translation. Note that the FIC instruction * may use either the instruction or data TLB. Given that we @@ -948,10 +842,8 @@ ENTRY_CFI(flush_icache_page_asm) 1: pitlb,l %r0(%sr4,%r28) ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) #else - tlb_lock %r20,%r21,%r22 0: pdtlb %r0(%r28) 1: pitlb %r0(%sr4,%r28) - tlb_unlock %r20,%r21,%r22 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) @@ -997,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm) ENTRY_CFI(flush_kernel_dcache_page_asm) 88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 + depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */ #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -1033,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm) ENTRY_CFI(purge_kernel_dcache_page_asm) 88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 + depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */ #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -1144,7 +1038,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_dcache_range_asm) @@ -1186,7 +1079,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) sync 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) - syncdma bv %r0(%r2) nop ENDPROC_CFI(purge_kernel_dcache_range_asm) @@ -1310,7 +1202,7 @@ ENTRY_CFI(flush_kernel_icache_range_asm) nop ENDPROC_CFI(flush_kernel_icache_range_asm) - __INIT + .text /* align should cover use of rfi in disable_sr_hashing_asm and * srdis_done. diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 7baa2265d439..509146a52725 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Architecture-specific kernel symbols * @@ -8,25 +9,12 @@ * Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org> * Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/syscalls.h> +#include <linux/libgcc.h> #include <linux/string.h> EXPORT_SYMBOL(memset); @@ -34,6 +22,8 @@ EXPORT_SYMBOL(memset); #include <linux/atomic.h> EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg32); +EXPORT_SYMBOL(__cmpxchg_u8); +EXPORT_SYMBOL(__cmpxchg_u16); EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u64); #ifdef CONFIG_SMP @@ -45,7 +35,6 @@ EXPORT_SYMBOL(__xchg64); #include <linux/uaccess.h> EXPORT_SYMBOL(lclear_user); -EXPORT_SYMBOL(lstrnlen_user); #ifndef CONFIG_64BIT /* Needed so insmod can set dp value */ @@ -54,9 +43,6 @@ EXPORT_SYMBOL($global$); #endif #include <asm/io.h> -EXPORT_SYMBOL(memcpy_toio); -EXPORT_SYMBOL(memcpy_fromio); -EXPORT_SYMBOL(memset_io); extern void $$divI(void); extern void $$divU(void); @@ -106,12 +92,6 @@ EXPORT_SYMBOL($$divI_12); EXPORT_SYMBOL($$divI_14); EXPORT_SYMBOL($$divI_15); -extern void __ashrdi3(void); -extern void __ashldi3(void); -extern void __lshrdi3(void); -extern void __muldi3(void); -extern void __ucmpdi2(void); - EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); @@ -138,12 +118,6 @@ extern void $$dyncall(void); EXPORT_SYMBOL($$dyncall); #endif -#ifdef CONFIG_DISCONTIGMEM -#include <asm/mmzone.h> -EXPORT_SYMBOL(node_data); -EXPORT_SYMBOL(pfnnid_map); -#endif - #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c new file mode 100644 index 000000000000..35dd764b871e --- /dev/null +++ b/arch/parisc/kernel/patch.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 + /* + * functions to patch RO kernel text during runtime + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/kprobes.h> +#include <linux/mm.h> +#include <linux/stop_machine.h> + +#include <asm/cacheflush.h> +#include <asm/fixmap.h> +#include <asm/text-patching.h> + +struct patch { + void *addr; + u32 *insn; + unsigned int len; +}; + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, + int *need_unmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool module = !core_kernel_text(uintaddr); + struct page *page; + + *need_unmap = 0; + if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + page = virt_to_page(addr); + else + return addr; + + *need_unmap = 1; + set_fixmap(fixmap, page_to_phys(page)); + raw_spin_lock_irqsave(&patch_lock, *flags); + + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap, unsigned long *flags) +{ + clear_fixmap(fixmap); + + raw_spin_unlock_irqrestore(&patch_lock, *flags); +} + +void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) +{ + unsigned long start = (unsigned long)addr; + unsigned long end = (unsigned long)addr + len; + unsigned long flags; + u32 *p, *fixmap; + int mapped; + + /* Make sure we don't have any aliases in cache */ + flush_kernel_dcache_range_asm(start, end); + flush_kernel_icache_range_asm(start, end); + flush_tlb_kernel_range(start, end); + + p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); + + while (len >= 4) { + *p++ = *insn++; + addr += sizeof(u32); + len -= sizeof(u32); + if (len && offset_in_page(addr) == 0) { + /* + * We're crossing a page boundary, so + * need to remap + */ + flush_kernel_dcache_range_asm((unsigned long)fixmap, + (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, + (unsigned long)p); + if (mapped) + patch_unmap(FIX_TEXT_POKE0, &flags); + p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, + &mapped); + } + } + + flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); + if (mapped) + patch_unmap(FIX_TEXT_POKE0, &flags); +} + +void __kprobes __patch_text(void *addr, u32 insn) +{ + __patch_text_multiple(addr, &insn, sizeof(insn)); +} + +static int __kprobes patch_text_stop_machine(void *data) +{ + struct patch *patch = data; + + __patch_text_multiple(patch->addr, patch->insn, patch->len); + return 0; +} + +void __kprobes patch_text(void *addr, unsigned int insn) +{ + struct patch patch = { + .addr = addr, + .insn = &insn, + .len = sizeof(insn), + }; + + stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); +} + +void __kprobes patch_text_multiple(void *addr, u32 *insn, unsigned int len) +{ + + struct patch patch = { + .addr = addr, + .insn = insn, + .len = len + }; + + stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); +} diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 239162355b58..bf9f192c826e 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -3,7 +3,7 @@ ** PARISC 1.1 Dynamic DMA mapping support. ** This implementation is for PA-RISC platforms that do not support ** I/O TLBs (aka DMA address translation hardware). -** See Documentation/DMA-API-HOWTO.txt for interface definitions. +** See Documentation/core-api/dma-api-howto.rst for interface definitions. ** ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler @@ -26,21 +26,20 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cacheflush.h> #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ #include <asm/io.h> #include <asm/page.h> /* get_order */ -#include <asm/pgalloc.h> #include <linux/uaccess.h> #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; -static unsigned long pcxl_used_bytes __read_mostly = 0; -static unsigned long pcxl_used_pages __read_mostly = 0; +static unsigned long pcxl_used_bytes __read_mostly; +static unsigned long pcxl_used_pages __read_mostly; -extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ +unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */ static DEFINE_SPINLOCK(pcxl_res_lock); static char *pcxl_res_map; static int pcxl_res_hint; @@ -92,7 +91,7 @@ static inline int map_pte_uncached(pte_t * pte, printk(KERN_ERR "map_pte_uncached: page already exists\n"); purge_tlb_start(flags); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); - pdtlb_kernel(orig_vaddr); + pdtlb(SR_KERNEL, orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; @@ -133,9 +132,14 @@ static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, dir = pgd_offset_k(vaddr); do { + p4d_t *p4d; + pud_t *pud; pmd_t *pmd; - - pmd = pmd_alloc(NULL, dir, vaddr); + + p4d = p4d_offset(dir, vaddr); + pud = pud_offset(p4d, vaddr); + pmd = pmd_alloc(NULL, pud, vaddr); + if (!pmd) return -ENOMEM; if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) @@ -160,7 +164,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pmd_clear(pmd); return; } - pte = pte_offset_map(pmd, vaddr); + pte = pte_offset_kernel(pmd, vaddr); vaddr &= ~PMD_MASK; end = vaddr + size; if (end > PMD_SIZE) @@ -171,7 +175,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pte_clear(&init_mm, vaddr, pte); purge_tlb_start(flags); - pdtlb_kernel(orig_vaddr); + pdtlb(SR_KERNEL, orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; @@ -196,7 +200,7 @@ static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, pgd_clear(dir); return; } - pmd = pmd_offset(dir, vaddr); + pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr); vaddr &= ~PGDIR_MASK; end = vaddr + size; if (end > PGDIR_SIZE) @@ -241,7 +245,7 @@ static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) PCXL_SEARCH_LOOP(idx, mask, size); \ } -unsigned long +static unsigned long pcxl_alloc_range(size_t size) { int res_idx; @@ -331,7 +335,7 @@ pcxl_free_range(unsigned long vaddr, size_t size) dump_resmap(); } -static int proc_pcxl_dma_show(struct seq_file *m, void *v) +static int __maybe_unused proc_pcxl_dma_show(struct seq_file *m, void *v) { #if 0 u_long i = 0; @@ -377,7 +381,7 @@ pcxl_dma_init(void) pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, get_order(pcxl_res_size)); memset(pcxl_res_map, 0, pcxl_res_size); - proc_gsc_root = proc_mkdir("gsc", NULL); + proc_gsc_root = proc_mkdir("bus/gsc", NULL); if (!proc_gsc_root) printk(KERN_WARNING "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); @@ -394,87 +398,67 @@ pcxl_dma_init(void) __initcall(pcxl_dma_init); -static void *pcxl_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { unsigned long vaddr; unsigned long paddr; int order; + if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl) + return NULL; + order = get_order(size); size = 1 << (order + PAGE_SHIFT); vaddr = pcxl_alloc_range(size); - paddr = __get_free_pages(flag | __GFP_ZERO, order); + paddr = __get_free_pages(gfp | __GFP_ZERO, order); flush_kernel_dcache_range(paddr, size); paddr = __pa(paddr); map_uncached_pages(vaddr, size, paddr); *dma_handle = (dma_addr_t) paddr; -#if 0 -/* This probably isn't needed to support EISA cards. -** ISA cards will certainly only support 24-bit DMA addressing. -** Not clear if we can, want, or need to support ISA. -*/ - if (!dev || *dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; -#endif return (void *)vaddr; } -static void *pcx_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) -{ - void *addr; - - if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) - return NULL; - - addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size)); - if (addr) - *dma_handle = (dma_addr_t)virt_to_phys(addr); - - return addr; -} - -void *arch_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) -{ - - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) - return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs); - else - return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs); -} - void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { int order = get_order(size); - if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { - size = 1 << (order + PAGE_SHIFT); - unmap_uncached_pages((unsigned long)vaddr, size); - pcxl_free_range((unsigned long)vaddr, size); + WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 && + boot_cpu_data.cpu_type != pcxl); - vaddr = __va(dma_handle); - } - free_pages((unsigned long)vaddr, get_order(size)); -} + size = 1 << (order + PAGE_SHIFT); + unmap_uncached_pages((unsigned long)vaddr, size); + pcxl_free_range((unsigned long)vaddr, size); -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir) -{ - flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); + free_pages((unsigned long)__va(dma_handle), order); } -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { + /* + * fdc: The data cache line is written back to memory, if and only if + * it is dirty, and then invalidated from the data cache. + */ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); } -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - flush_kernel_dcache_range((unsigned long)vaddr, size); + unsigned long addr = (unsigned long) phys_to_virt(paddr); + + switch (dir) { + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + flush_kernel_dcache_range(addr, size); + return; + case DMA_FROM_DEVICE: + purge_kernel_dcache_range_asm(addr, addr + size); + return; + default: + BUG(); + } } diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index ae684ac6efb6..cf285b17a5ae 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -34,25 +34,14 @@ #define DBG_RES(x...) #endif -/* To be used as: mdelay(pci_post_reset_delay); - * - * post_reset is the time the kernel should stall to prevent anyone from - * accessing the PCI bus once #RESET is de-asserted. - * PCI spec somewhere says 1 second but with multi-PCI bus systems, - * this makes the boot time much longer than necessary. - * 20ms seems to work for all the HP PCI implementations to date. - * - * #define pci_post_reset_delay 50 - */ - -struct pci_port_ops *pci_port __read_mostly; -struct pci_bios_ops *pci_bios __read_mostly; +struct pci_port_ops *pci_port __ro_after_init; +struct pci_bios_ops *pci_bios __ro_after_init; -static int pci_hba_count __read_mostly; +static int pci_hba_count __ro_after_init; /* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */ #define PCI_HBA_MAX 32 -static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly; +static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __ro_after_init; /******************************************************************** diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c index 28e07482b0f1..d477d0177c2f 100644 --- a/arch/parisc/kernel/pdc_chassis.c +++ b/arch/parisc/kernel/pdc_chassis.c @@ -1,22 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * interfaces to Chassis Codes via PDC (firmware) * * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr> * Copyright (C) 2002-2006 Thibaut VARENE <varenet@parisc-linux.org> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * TODO: poll chassis warns, trigger (configurable) machine shutdown when * needed. * Find out how to get Chassis warnings out of PAT boxes? @@ -32,6 +20,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/panic_notifier.h> #include <linux/reboot.h> #include <linux/notifier.h> #include <linux/cache.h> @@ -42,6 +31,7 @@ #include <asm/processor.h> #include <asm/pdc.h> #include <asm/pdcpat.h> +#include <asm/led.h> #define PDC_CHASSIS_VER "0.05" @@ -51,7 +41,7 @@ static unsigned int pdc_chassis_enabled __read_mostly = 1; /** * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time. - * @str configuration param: 0 to disable chassis log + * @str: configuration param: 0 to disable chassis log * @return 1 */ @@ -66,7 +56,6 @@ __setup("pdcchassis=", pdc_chassis_setup); /** * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility - * @pdc_chassis_old: 1 if old pdc chassis style * * Currently, only E class and A180 are known to work with this. * Inspired by Christoph Plattner @@ -91,6 +80,9 @@ static void __init pdc_chassis_checkold(void) /** * pdc_chassis_panic_event() - Called by the panic handler. + * @this: unused + * @event: unused + * @ptr: unused * * As soon as a panic occurs, we should inform the PDC. */ @@ -99,7 +91,7 @@ static int pdc_chassis_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); - return NOTIFY_DONE; + return NOTIFY_DONE; } @@ -110,7 +102,10 @@ static struct notifier_block pdc_chassis_panic_block = { /** - * parisc_reboot_event() - Called by the reboot handler. + * pdc_chassis_reboot_event() - Called by the reboot handler. + * @this: unused + * @event: unused + * @ptr: unused * * As soon as a reboot occurs, we should inform the PDC. */ @@ -119,7 +114,7 @@ static int pdc_chassis_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); - return NOTIFY_DONE; + return NOTIFY_DONE; } @@ -159,7 +154,7 @@ void __init parisc_pdc_chassis_init(void) /** * pdc_chassis_send_status() - Sends a predefined message to the chassis, * and changes the front panel LEDs according to the new system state - * @retval: PDC call return value. + * @message: Type of message, one of PDC_CHASSIS_DIRECT_* values. * * Only machines with 64 bits PDC PAT and those reported in * pdc_chassis_checkold() are supported atm. @@ -240,6 +235,11 @@ int pdc_chassis_send_status(int message) } else retval = -1; #endif /* CONFIG_64BIT */ } /* if (pdc_chassis_enabled) */ + + /* if system has LCD display, update current string */ + if (retval != -1 && IS_ENABLED(CONFIG_CHASSIS_LCD_LED)) + lcd_print(NULL); + #endif /* CONFIG_PDC_CHASSIS */ return retval; } diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index c46bf29ae412..cf3bf8232374 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -1,281 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* - * PDC Console support - ie use firmware to dump text via boot console + * PDC early console support - use PDC firmware to dump text via boot console * - * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> - * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> - * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> - * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> - * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> - * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> - * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> - * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> - * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org> - * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> - * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> - * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* - * The PDC console is a simple console, which can be used for debugging - * boot related problems on HP PA-RISC machines. It is also useful when no - * other console works. - * - * This code uses the ROM (=PDC) based functions to read and write characters - * from and to PDC's boot path. + * Copyright (C) 2001-2022 Helge Deller <deller@gmx.de> */ -/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems. - * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */ -#define EARLY_BOOTUP_DEBUG - - -#include <linux/kernel.h> #include <linux/console.h> -#include <linux/string.h> #include <linux/init.h> -#include <linux/major.h> -#include <linux/tty.h> +#include <linux/serial_core.h> +#include <linux/kgdb.h> #include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for iodc_call() proto and friends */ -static DEFINE_SPINLOCK(pdc_console_lock); -static struct console pdc_cons; - static void pdc_console_write(struct console *co, const char *s, unsigned count) { int i = 0; - unsigned long flags; - spin_lock_irqsave(&pdc_console_lock, flags); do { i += pdc_iodc_print(s + i, count - i); } while (i < count); - spin_unlock_irqrestore(&pdc_console_lock, flags); -} - -int pdc_console_poll_key(struct console *co) -{ - int c; - unsigned long flags; - - spin_lock_irqsave(&pdc_console_lock, flags); - c = pdc_iodc_getc(); - spin_unlock_irqrestore(&pdc_console_lock, flags); - - return c; -} - -static int pdc_console_setup(struct console *co, char *options) -{ - return 0; } -#if defined(CONFIG_PDC_CONSOLE) -#include <linux/vt_kern.h> -#include <linux/tty_flip.h> - -#define PDC_CONS_POLL_DELAY (30 * HZ / 1000) - -static void pdc_console_poll(struct timer_list *unused); -static DEFINE_TIMER(pdc_console_timer, pdc_console_poll); -static struct tty_port tty_port; - -static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) -{ - tty_port_tty_set(&tty_port, tty); - mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); - - return 0; -} - -static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) +#ifdef CONFIG_KGDB +static int kgdb_pdc_read_char(void) { - if (tty->count == 1) { - del_timer_sync(&pdc_console_timer); - tty_port_tty_set(&tty_port, NULL); - } -} + int c = pdc_iodc_getc(); -static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) -{ - pdc_console_write(NULL, buf, count); - return count; + return (c <= 0) ? NO_POLL_CHAR : c; } -static int pdc_console_tty_write_room(struct tty_struct *tty) +static void kgdb_pdc_write_char(u8 chr) { - return 32768; /* no limit, no buffer used */ + /* no need to print char as it's shown on standard console */ + /* pdc_iodc_print(&chr, 1); */ } -static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty) -{ - return 0; /* no buffer */ -} - -static const struct tty_operations pdc_console_tty_ops = { - .open = pdc_console_tty_open, - .close = pdc_console_tty_close, - .write = pdc_console_tty_write, - .write_room = pdc_console_tty_write_room, - .chars_in_buffer = pdc_console_tty_chars_in_buffer, +static struct kgdb_io kgdb_pdc_io_ops = { + .name = "kgdb_pdc", + .read_char = kgdb_pdc_read_char, + .write_char = kgdb_pdc_write_char, }; - -static void pdc_console_poll(struct timer_list *unused) -{ - int data, count = 0; - - while (1) { - data = pdc_console_poll_key(NULL); - if (data == -1) - break; - tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL); - count ++; - } - - if (count) - tty_flip_buffer_push(&tty_port); - - if (pdc_cons.flags & CON_ENABLED) - mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); -} - -static struct tty_driver *pdc_console_tty_driver; - -static int __init pdc_console_tty_driver_init(void) -{ - int err; - - /* Check if the console driver is still registered. - * It is unregistered if the pdc console was not selected as the - * primary console. */ - - struct console *tmp; - - console_lock(); - for_each_console(tmp) - if (tmp == &pdc_cons) - break; - console_unlock(); - - if (!tmp) { - printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name); - return -ENODEV; - } - - printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n"); - pdc_cons.flags &= ~CON_BOOT; - - pdc_console_tty_driver = alloc_tty_driver(1); - - if (!pdc_console_tty_driver) - return -ENOMEM; - - tty_port_init(&tty_port); - - pdc_console_tty_driver->driver_name = "pdc_cons"; - pdc_console_tty_driver->name = "ttyB"; - pdc_console_tty_driver->major = MUX_MAJOR; - pdc_console_tty_driver->minor_start = 0; - pdc_console_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; - pdc_console_tty_driver->init_termios = tty_std_termios; - pdc_console_tty_driver->flags = TTY_DRIVER_REAL_RAW | - TTY_DRIVER_RESET_TERMIOS; - tty_set_operations(pdc_console_tty_driver, &pdc_console_tty_ops); - tty_port_link_device(&tty_port, pdc_console_tty_driver, 0); - - err = tty_register_driver(pdc_console_tty_driver); - if (err) { - printk(KERN_ERR "Unable to register the PDC console TTY driver\n"); - tty_port_destroy(&tty_port); - return err; - } - - return 0; -} -device_initcall(pdc_console_tty_driver_init); - -static struct tty_driver * pdc_console_device (struct console *c, int *index) -{ - *index = c->index; - return pdc_console_tty_driver; -} -#else -#define pdc_console_device NULL #endif -static struct console pdc_cons = { - .name = "ttyB", - .write = pdc_console_write, - .device = pdc_console_device, - .setup = pdc_console_setup, - .flags = CON_BOOT | CON_PRINTBUFFER, - .index = -1, -}; - -static int pdc_console_initialized; - -static void pdc_console_init_force(void) +static int __init pdc_earlycon_setup(struct earlycon_device *device, + const char *opt) { - if (pdc_console_initialized) - return; - ++pdc_console_initialized; - + struct console *earlycon_console; + /* If the console is duplex then copy the COUT parameters to CIN. */ if (PAGE0->mem_cons.cl_class == CL_DUPLEX) memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons)); - /* register the pdc console */ - register_console(&pdc_cons); -} + earlycon_console = device->con; + earlycon_console->write = pdc_console_write; + device->port.iotype = UPIO_MEM32BE; -void __init pdc_console_init(void) -{ -#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE) - pdc_console_init_force(); -#endif -#ifdef EARLY_BOOTUP_DEBUG - printk(KERN_INFO "Initialized PDC Console for debugging.\n"); +#ifdef CONFIG_KGDB + kgdb_register_io_module(&kgdb_pdc_io_ops); #endif -} - -/* - * Used for emergencies. Currently only used if an HPMC occurs. If an - * HPMC occurs, it is possible that the current console may not be - * properly initialised after the PDC IO reset. This routine unregisters - * all of the current consoles, reinitializes the pdc console and - * registers it. - */ - -void pdc_console_restart(void) -{ - struct console *console; - - if (pdc_console_initialized) - return; - - /* If we've already seen the output, don't bother to print it again */ - if (console_drivers != NULL) - pdc_cons.flags &= ~CON_PRINTBUFFER; - - while ((console = console_drivers) != NULL) - unregister_console(console_drivers); - - /* force registering the pdc console */ - pdc_console_init_force(); + return 0; } + +EARLYCON_DECLARE(pdc, pdc_earlycon_setup); diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 36434d4da381..b70b67adb855 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -16,7 +16,10 @@ #include <linux/memblock.h> #include <linux/seq_file.h> #include <linux/kthread.h> +#include <linux/proc_fs.h> #include <linux/initrd.h> +#include <linux/pgtable.h> +#include <linux/mm.h> #include <asm/pdc.h> #include <asm/pdcpat.h> @@ -60,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; #define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL) #define PDT_ADDR_SINGLE_ERR 1UL +#ifdef CONFIG_PROC_FS /* report PDT entries via /proc/meminfo */ void arch_report_meminfo(struct seq_file *m) { @@ -71,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m) seq_printf(m, "PDT_cur_entries: %7lu\n", pdt_status.pdt_entries); } +#endif static int get_info_pat_new(void) { @@ -230,6 +235,7 @@ void __init pdc_pdt_init(void) /* mark memory page bad */ memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); + num_poisoned_pages_inc(addr >> PAGE_SHIFT); } } @@ -327,8 +333,7 @@ static int pdt_mainloop(void *unused) ((pde & PDT_ADDR_SINGLE_ERR) == 0)) memory_failure(pde >> PAGE_SHIFT, 0); else - soft_offline_page( - pfn_to_page(pde >> PAGE_SHIFT), 0); + soft_offline_page(pde >> PAGE_SHIFT, 0); #else pr_crit("PDT: memory error at 0x%lx ignored.\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y " @@ -350,13 +355,9 @@ static int __init pdt_initcall(void) if (pdt_type == PDT_NONE) return -ENODEV; - kpdtd_task = kthread_create(pdt_mainloop, NULL, "kpdtd"); - if (IS_ERR(kpdtd_task)) - return PTR_ERR(kpdtd_task); - - wake_up_process(kpdtd_task); + kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd"); - return 0; + return PTR_ERR_OR_ZERO(kpdtd_task); } late_initcall(pdt_initcall); diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index 0813359049ae..5e10f98ce7b5 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Parisc performance counters * Copyright (C) 2001 Randolph Chung <tausq@debian.org> * * This code is derived, with permission, from HP/UX sources. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* @@ -70,7 +57,7 @@ struct rdr_tbl_ent { static int perf_processor_interface __read_mostly = UNKNOWN_INTF; static int perf_enabled __read_mostly; static DEFINE_SPINLOCK(perf_lock); -struct parisc_device *cpu_device __read_mostly; +static struct parisc_device *cpu_device __read_mostly; /* RDRs to write for PCX-W */ static const int perf_rdrs_W[] = @@ -301,7 +288,7 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - size_t image_size; + size_t image_size __maybe_unused; uint32_t image_type; uint32_t interface_type; uint32_t test; @@ -313,7 +300,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, else return -EFAULT; - if (!capable(CAP_SYS_ADMIN)) + if (!perfmon_capable()) return -EACCES; if (count != sizeof(uint32_t)) @@ -479,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } static const struct file_operations perf_fops = { - .llseek = no_llseek, .read = perf_read, .write = perf_write, .unlocked_ioctl = perf_ioctl, @@ -489,9 +475,9 @@ static const struct file_operations perf_fops = { }; static struct miscdevice perf_dev = { - MISC_DYNAMIC_MINOR, - PA_PERF_DEV, - &perf_fops + .minor = MISC_DYNAMIC_MINOR, + .name = PA_PERF_DEV, + .fops = &perf_fops, }; /* @@ -805,7 +791,7 @@ static int perf_write_image(uint64_t *memaddr) return -1; } - runway = ioremap_nocache(cpu_device->hpa.start, 4096); + runway = ioremap(cpu_device->hpa.start, 4096); if (!runway) { pr_err("perf_write_image: ioremap failed!\n"); return -ENOMEM; diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S index fa6ea99bb324..8fceabb1a832 100644 --- a/arch/parisc/kernel/perf_asm.S +++ b/arch/parisc/kernel/perf_asm.S @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* low-level asm for "intrigue" (PA8500-8700 CPU perf counters) * * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org> * Copyright (C) 2001 Hewlett-Packard (Grant Grundler) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/assembly.h> diff --git a/arch/parisc/kernel/perf_event.c b/arch/parisc/kernel/perf_event.c new file mode 100644 index 000000000000..f90b83886ab4 --- /dev/null +++ b/arch/parisc/kernel/perf_event.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support for parisc + * + * Copyright (C) 2025 by Helge Deller <deller@gmx.de> + */ + +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <asm/unwind.h> + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct unwind_frame_info info; + + unwind_frame_init_task(&info, current, NULL); + while (1) { + if (unwind_once(&info) < 0 || info.ip == 0) + break; + + if (!__kernel_text_address(info.ip) || + perf_callchain_store(entry, info.ip)) + return; + } +} diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h index 7fef9644df47..7afa2fd550bf 100644 --- a/arch/parisc/kernel/perf_images.h +++ b/arch/parisc/kernel/perf_images.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Imagine for use with the Onyx (PCX-U) CPU interface * * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org> * Copyright (C) 2001 Hewlett-Packard (Grant Grundler) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef PERF_IMAGES_H #define PERF_IMAGES_H @@ -25,7 +12,7 @@ #define PCXU_IMAGE_SIZE 584 -static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = { +static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = { /* * CPI: * @@ -2093,7 +2080,7 @@ static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = }; #define PCXW_IMAGE_SIZE 576 -static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = { +static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = { /* * CPI: FROM CPI.IDF (Image 0) * diff --git a/arch/parisc/kernel/perf_regs.c b/arch/parisc/kernel/perf_regs.c new file mode 100644 index 000000000000..10a1a5f06a18 --- /dev/null +++ b/arch/parisc/kernel/perf_regs.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2025 by Helge Deller <deller@gmx.de> */ + +#include <linux/perf_event.h> +#include <linux/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + switch (idx) { + case PERF_REG_PARISC_R0 ... PERF_REG_PARISC_R31: + return regs->gr[idx - PERF_REG_PARISC_R0]; + case PERF_REG_PARISC_SR0 ... PERF_REG_PARISC_SR7: + return regs->sr[idx - PERF_REG_PARISC_SR0]; + case PERF_REG_PARISC_IASQ0 ... PERF_REG_PARISC_IASQ1: + return regs->iasq[idx - PERF_REG_PARISC_IASQ0]; + case PERF_REG_PARISC_IAOQ0 ... PERF_REG_PARISC_IAOQ1: + return regs->iasq[idx - PERF_REG_PARISC_IAOQ0]; + case PERF_REG_PARISC_SAR: /* CR11 */ + return regs->sar; + case PERF_REG_PARISC_IIR: /* CR19 */ + return regs->iir; + case PERF_REG_PARISC_ISR: /* CR20 */ + return regs->isr; + case PERF_REG_PARISC_IOR: /* CR21 */ + return regs->ior; + case PERF_REG_PARISC_IPSW: /* CR22 */ + return regs->ipsw; + } + WARN_ON_ONCE((u32)idx >= PERF_REG_PARISC_MAX); + return 0; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_PARISC_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + if (!IS_ENABLED(CONFIG_64BIT)) + return PERF_SAMPLE_REGS_ABI_32; + + if (test_tsk_thread_flag(task, TIF_32BIT)) + return PERF_SAMPLE_REGS_ABI_32; + + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index eb39e7e380d7..e64ab5d2a40d 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * PARISC Architecture-dependent parts of process handling * based on the work for i386 @@ -15,25 +16,7 @@ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include <stdarg.h> - #include <linux/elf.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -43,6 +26,7 @@ #include <linux/module.h> #include <linux/personality.h> #include <linux/ptrace.h> +#include <linux/reboot.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> @@ -55,15 +39,16 @@ #include <linux/rcupdate.h> #include <linux/random.h> #include <linux/nmi.h> +#include <linux/sched/hotplug.h> #include <asm/io.h> #include <asm/asm-offsets.h> #include <asm/assembly.h> #include <asm/pdc.h> #include <asm/pdc_chassis.h> -#include <asm/pgalloc.h> #include <asm/unwind.h> #include <asm/sections.h> +#include <asm/cacheflush.h> #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) #define CMD_RESET 5 /* reset any module */ @@ -112,18 +97,12 @@ void machine_restart(char *cmd) } -void (*chassis_power_off)(void); - /* * This routine is called from sys_reboot to actually turn off the * machine */ void machine_power_off(void) { - /* If there is a registered power off handler, call it. */ - if (chassis_power_off) - chassis_power_off(); - /* Put the soft power button back under hardware control. * If the user had already pressed the power button, the * following call will immediately power off. */ @@ -132,19 +111,23 @@ void machine_power_off(void) pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); /* ipmi_poweroff may have been installed. */ - if (pm_power_off) - pm_power_off(); + do_kernel_power_off(); /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ - printk(KERN_EMERG "System shut down completed.\n" - "Please power this system off now."); + printk("Power off or press RETURN to reboot.\n"); /* prevent soft lockup/stalled CPU messages for endless loop. */ rcu_sysrq_start(); lockup_detector_soft_poweroff(); - for (;;); + while (1) { + /* reboot if user presses RETURN key */ + if (pdc_iodc_getc() == 13) { + printk("Rebooting...\n"); + machine_restart(NULL); + } + } } void (*pm_power_off)(void); @@ -162,29 +145,6 @@ void flush_thread(void) */ } -void release_thread(struct task_struct *dead_task) -{ -} - -/* - * Fill in the FPU structure for a core dump. - */ - -int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) -{ - if (regs == NULL) - return 0; - - memcpy(r, regs->fr, sizeof *r); - return 1; -} - -int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) -{ - memcpy(r, tsk->thread.regs.fr, sizeof(*r)); - return 1; -} - /* * Idle thread support * @@ -192,30 +152,42 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) * QEMU idle the host too. */ -int running_on_qemu __read_mostly; +int running_on_qemu __ro_after_init; +EXPORT_SYMBOL(running_on_qemu); -void __cpuidle arch_cpu_idle_dead(void) +/* + * Called from the idle thread for the CPU which has been shutdown. + */ +void __noreturn arch_cpu_idle_dead(void) { - /* nop on real hardware, qemu will offline CPU. */ - asm volatile("or %%r31,%%r31,%%r31\n":::); +#ifdef CONFIG_HOTPLUG_CPU + idle_task_exit(); + + local_irq_disable(); + + /* Tell the core that this CPU is now safe to dispose of. */ + cpuhp_ap_report_dead(); + + /* Ensure that the cache lines are written out. */ + flush_cache_all_local(); + flush_tlb_all_local(NULL); + + /* Let PDC firmware put CPU into firmware idle loop. */ + __pdc_cpu_rendezvous(); + + pr_warn("PDC does not provide rendezvous function.\n"); +#endif + while (1); } void __cpuidle arch_cpu_idle(void) { - local_irq_enable(); - /* nop on real hardware, qemu will idle sleep. */ asm volatile("or %%r10,%%r10,%%r10\n":::); } static int __init parisc_idle_init(void) { - const char *marker; - - /* check QEMU/SeaBIOS marker in PAGE0 */ - marker = (char *) &PAGE0->pad0; - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); - if (!running_on_qemu) cpu_idle_poll_ctrl(1); @@ -227,9 +199,11 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + u64 clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -239,27 +213,27 @@ copy_thread(unsigned long clone_flags, unsigned long usp, extern void * const ret_from_kernel_thread; extern void * const child_return; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(args->fn)) { /* kernel thread */ memset(cregs, 0, sizeof(struct pt_regs)); - if (!usp) /* idle thread */ + if (args->idle) /* idle thread */ return 0; /* Must exit via ret_from_kernel_thread in order * to call schedule_tail() */ - cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; + cregs->ksp = (unsigned long) stack + FRAME_SIZE + PT_SZ_ALGN; cregs->kpc = (unsigned long) &ret_from_kernel_thread; /* * Copy function and argument to be called from * ret_from_kernel_thread. */ #ifdef CONFIG_64BIT - cregs->gr[27] = ((unsigned long *)usp)[3]; - cregs->gr[26] = ((unsigned long *)usp)[2]; + cregs->gr[27] = ((unsigned long *)args->fn)[3]; + cregs->gr[26] = ((unsigned long *)args->fn)[2]; #else - cregs->gr[26] = usp; + cregs->gr[26] = (unsigned long) args->fn; #endif - cregs->gr[25] = kthread_arg; + cregs->gr[25] = (unsigned long) args->fn_arg; } else { /* user thread */ /* usp must be word aligned. This also prevents users from @@ -270,27 +244,24 @@ copy_thread(unsigned long clone_flags, unsigned long usp, if (likely(usp)) cregs->gr[30] = usp; } - cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; + cregs->ksp = (unsigned long) stack + FRAME_SIZE; cregs->kpc = (unsigned long) &child_return; - /* Setup thread TLS area from the 4th parameter in clone */ + /* Setup thread TLS area */ if (clone_flags & CLONE_SETTLS) - cregs->cr27 = cregs->gr[23]; + cregs->cr27 = tls; } return 0; } unsigned long -get_wchan(struct task_struct *p) +__get_wchan(struct task_struct *p) { struct unwind_frame_info info; unsigned long ip; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - /* * These bracket the sleeping functions.. */ @@ -299,44 +270,11 @@ get_wchan(struct task_struct *p) do { if (unwind_once(&info) < 0) return 0; + if (task_is_running(p)) + return 0; ip = info.ip; if (!in_sched_functions(ip)) return ip; } while (count++ < MAX_UNWIND_ENTRIES); return 0; } - -#ifdef CONFIG_64BIT -void *dereference_function_descriptor(void *ptr) -{ - Elf64_Fdesc *desc = ptr; - void *p; - - if (!probe_kernel_address(&desc->addr, p)) - ptr = p; - return ptr; -} - -void *dereference_kernel_function_descriptor(void *ptr) -{ - if (ptr < (void *)__start_opd || - ptr >= (void *)__end_opd) - return ptr; - - return dereference_function_descriptor(ptr); -} -#endif - -static inline unsigned long brk_rnd(void) -{ - return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - return ret; -} diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 82bd0d0927ce..bf73562706b2 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Initial setup-routines for HP 9000 based hardware. * @@ -9,21 +10,6 @@ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net> * * Initial PA-RISC Version: 04-23-1999 by Helge Deller - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #include <linux/delay.h> #include <linux/init.h> @@ -33,20 +19,22 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/cpu.h> +#include <asm/topology.h> #include <asm/param.h> #include <asm/cache.h> #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/processor.h> #include <asm/page.h> #include <asm/pdc.h> +#include <asm/smp.h> #include <asm/pdcpat.h> #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> -struct system_cpuinfo_parisc boot_cpu_data __read_mostly; +struct system_cpuinfo_parisc boot_cpu_data __ro_after_init; EXPORT_SYMBOL(boot_cpu_data); #ifdef CONFIG_PA8X00 -int _parisc_requires_coherency __read_mostly; +int _parisc_requires_coherency __ro_after_init; EXPORT_SYMBOL(_parisc_requires_coherency); #endif @@ -71,7 +59,7 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); */ /** - * init_cpu_profiler - enable/setup per cpu profiling hooks. + * init_percpu_prof - enable/setup per cpu profiling hooks. * @cpunum: The processor instance. * * FIXME: doesn't do much yet... @@ -177,7 +165,6 @@ static int __init processor_probe(struct parisc_device *dev) if (cpuid) memset(p, 0, sizeof(struct cpuinfo_parisc)); - p->loops_per_jiffy = loops_per_jiffy; p->dev = dev; /* Save IODC data in case we need it */ p->hpa = dev->hpa.start; /* save CPU hpa */ p->cpuid = cpuid; /* save CPU id */ @@ -226,7 +213,7 @@ static int __init processor_probe(struct parisc_device *dev) #ifdef CONFIG_SMP if (cpuid) { set_cpu_present(cpuid, true); - cpu_up(cpuid); + add_cpu(cpuid); } #endif @@ -242,6 +229,7 @@ static int __init processor_probe(struct parisc_device *dev) void __init collect_boot_cpu_data(void) { unsigned long cr16_seed; + char orig_prod_num[64], current_prod_num[64], serial_no[64]; memset(&boot_cpu_data, 0, sizeof(boot_cpu_data)); @@ -253,9 +241,9 @@ void __init collect_boot_cpu_data(void) /* get CPU-Model Information... */ #define p ((unsigned long *)&boot_cpu_data.pdc.model) if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) { - printk(KERN_INFO - "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); + printk(KERN_INFO + "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]); add_device_randomness(&boot_cpu_data.pdc.model, sizeof(boot_cpu_data.pdc.model)); @@ -284,10 +272,15 @@ void __init collect_boot_cpu_data(void) printk(KERN_INFO "capabilities 0x%lx\n", boot_cpu_data.pdc.capabilities); - if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK) - printk(KERN_INFO "model %s\n", + if (pdc_model_sysmodel(OS_ID_HPUX, boot_cpu_data.pdc.sys_model_name) == PDC_OK) + pr_info("HP-UX model name: %s\n", boot_cpu_data.pdc.sys_model_name); + serial_no[0] = 0; + if (pdc_model_sysmodel(OS_ID_MPEXL, serial_no) == PDC_OK && + serial_no[0]) + pr_info("MPE/iX model name: %s\n", serial_no); + dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name); boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; @@ -301,6 +294,15 @@ void __init collect_boot_cpu_data(void) _parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) || (boot_cpu_data.cpu_type == mako2); #endif + + if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) { + printk(KERN_INFO "product %s, original product %s, S/N: %s\n", + current_prod_num[0] ? current_prod_num : "n/a", + orig_prod_num, serial_no); + add_device_randomness(orig_prod_num, strlen(orig_prod_num)); + add_device_randomness(current_prod_num, strlen(current_prod_num)); + add_device_randomness(serial_no, strlen(serial_no)); + } } @@ -322,7 +324,7 @@ void __init collect_boot_cpu_data(void) * * o Enable CPU profiling hooks. */ -int __init init_per_cpu(int cpunum) +int init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; @@ -330,8 +332,6 @@ int __init init_per_cpu(int cpunum) set_firmware_width(); ret = pdc_coproc_cfg(&coproc_cfg); - store_cpu_topology(cpunum); - if(ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */ @@ -367,6 +367,8 @@ int __init init_per_cpu(int cpunum) /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */ init_percpu_prof(cpunum); + btlb_init_per_cpu(); + return ret; } @@ -377,10 +379,18 @@ int show_cpuinfo (struct seq_file *m, void *v) { unsigned long cpu; + char cpu_name[60], *p; + + /* strip PA path from CPU name to not confuse lscpu */ + strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); + p = strrchr(cpu_name, '['); + if (p) + *(--p) = 0; for_each_online_cpu(cpu) { - const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP + const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); + if (0 == cpuinfo->hpa) continue; #endif @@ -395,7 +405,7 @@ show_cpuinfo (struct seq_file *m, void *v) boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); -#ifdef CONFIG_PARISC_CPU_TOPOLOGY +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY seq_printf(m, "physical id\t: %d\n", topology_physical_package_id(cpu)); seq_printf(m, "siblings\t: %d\n", @@ -423,11 +433,9 @@ show_cpuinfo (struct seq_file *m, void *v) } seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); - seq_printf(m, "model\t\t: %s\n" - "model name\t: %s\n", + seq_printf(m, "model\t\t: %s - %s\n", boot_cpu_data.pdc.sys_model_name, - cpuinfo->dev ? - cpuinfo->dev->name : "Unknown"); + cpu_name); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", @@ -438,8 +446,8 @@ show_cpuinfo (struct seq_file *m, void *v) show_cache_info(m); seq_printf(m, "bogomips\t: %lu.%02lu\n", - cpuinfo->loops_per_jiffy / (500000 / HZ), - (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); + loops_per_jiffy / (500000 / HZ), + loops_per_jiffy / (5000 / HZ) % 100); seq_printf(m, "software id\t: %ld\n\n", boot_cpu_data.pdc.model.sw_id); @@ -465,5 +473,6 @@ static struct parisc_driver cpu_driver __refdata = { */ void __init processor_init(void) { + reset_cpu_topology(); register_parisc_driver(&cpu_driver); } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..8a17ab7e6e0b 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -15,7 +15,6 @@ #include <linux/elf.h> #include <linux/errno.h> #include <linux/ptrace.h> -#include <linux/tracehook.h> #include <linux/user.h> #include <linux/personality.h> #include <linux/regset.h> @@ -26,7 +25,6 @@ #include <linux/audit.h> #include <linux/uaccess.h> -#include <asm/pgtable.h> #include <asm/processor.h> #include <asm/asm-offsets.h> @@ -88,9 +86,9 @@ void user_enable_single_step(struct task_struct *task) ptrace_disable(task); /* Don't wake up the task, but let the parent know something happened. */ - force_sig_fault(SIGTRAP, TRAP_TRACE, - (void __user *) (task_regs(task)->iaoq[0] & ~3), - task); + force_sig_fault_to_task(SIGTRAP, TRAP_TRACE, + (void __user *) (task_regs(task)->iaoq[0] & ~3), + task); /* notify_parent(task, SIGCHLD); */ return; } @@ -128,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long tmp; long ret = -EIO; + unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); +#ifdef CONFIG_64BIT + if (is_compat_task()) + user_regs_struct_size /= 2; +#endif + switch (request) { /* Read the word at location addr in the USER area. For ptraced @@ -167,6 +171,9 @@ long arch_ptrace(struct task_struct *child, long request, if ((addr & (sizeof(unsigned long)-1)) || addr >= sizeof(struct pt_regs)) break; + if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { + data |= PRIV_USER; /* ensure userspace privilege */ + } if ((addr >= PT_GR1 && addr <= PT_GR31) || addr == PT_IAOQ0 || addr == PT_IAOQ1 || (addr >= PT_FR0 && addr <= PT_FR31 + 4) || @@ -180,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request, return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ @@ -228,16 +235,18 @@ long arch_ptrace(struct task_struct *child, long request, static compat_ulong_t translate_usr_offset(compat_ulong_t offset) { - if (offset < 0) - return sizeof(struct pt_regs); - else if (offset <= 32*4) /* gr[0..31] */ - return offset * 2 + 4; - else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ - return offset + 32*4; - else if (offset < sizeof(struct pt_regs)/2 + 32*4) - return offset * 2 + 4 - 32*8; + compat_ulong_t pos; + + if (offset < 32*4) /* gr[0..31] */ + pos = offset * 2 + 4; + else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */ + pos = (offset - 32*4) + PT_FR0; + else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */ + pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4; else - return sizeof(struct pt_regs); + pos = sizeof(struct pt_regs); + + return pos; } long compat_arch_ptrace(struct task_struct *child, compat_long_t request, @@ -281,9 +290,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr = translate_usr_offset(addr); if (addr >= sizeof(struct pt_regs)) break; + if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { + data |= PRIV_USER; /* ensure userspace privilege */ + } if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { /* Special case, fp regs are 64 bits anyway */ - *(__u64 *) ((char *) task_regs(child) + addr) = data; + *(__u32 *) ((char *) task_regs(child) + addr) = data; ret = 0; } else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || @@ -296,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } } break; + case PTRACE_GETREGS: + case PTRACE_SETREGS: + case PTRACE_GETFPREGS: + case PTRACE_SETFPREGS: + return arch_ptrace(child, request, addr, data); default: ret = compat_ptrace_request(child, request, addr, data); @@ -308,19 +325,33 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = ptrace_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * ptrace_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ - if (secure_computing(NULL) == -1) + if (secure_computing() == -1) return -1; #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS @@ -340,7 +371,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call @@ -361,7 +391,7 @@ void do_syscall_trace_exit(struct pt_regs *regs) #endif if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, stepping); + ptrace_report_syscall_exit(regs, stepping); } @@ -371,31 +401,11 @@ void do_syscall_trace_exit(struct pt_regs *regs) static int fpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { struct pt_regs *regs = task_regs(target); - __u64 *k = kbuf; - __u64 __user *u = ubuf; - __u64 reg; - - pos /= sizeof(reg); - count /= sizeof(reg); - - if (kbuf) - for (; count > 0 && pos < ELF_NFPREG; --count) - *k++ = regs->fr[pos++]; - else - for (; count > 0 && pos < ELF_NFPREG; --count) - if (__put_user(regs->fr[pos++], u++)) - return -EFAULT; - kbuf = k; - ubuf = u; - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - ELF_NFPREG * sizeof(reg), -1); + return membuf_write(&to, regs->fr, ELF_NFPREG * sizeof(__u64)); } static int fpr_set(struct task_struct *target, @@ -425,8 +435,9 @@ static int fpr_set(struct task_struct *target, ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); - return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - ELF_NFPREG * sizeof(reg), -1); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NFPREG * sizeof(reg), -1); + return 0; } #define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) @@ -483,7 +494,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val) return; case RI(iaoq[0]): case RI(iaoq[1]): - regs->iaoq[num - RI(iaoq[0])] = val; + /* set 2 lowest bits to ensure userspace privilege: */ + regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER; return; case RI(sar): regs->sar = val; return; @@ -506,30 +518,14 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val) static int gpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { struct pt_regs *regs = task_regs(target); - unsigned long *k = kbuf; - unsigned long __user *u = ubuf; - unsigned long reg; + unsigned int pos; - pos /= sizeof(reg); - count /= sizeof(reg); - - if (kbuf) - for (; count > 0 && pos < ELF_NGREG; --count) - *k++ = get_reg(regs, pos++); - else - for (; count > 0 && pos < ELF_NGREG; --count) - if (__put_user(get_reg(regs, pos++), u++)) - return -EFAULT; - kbuf = k; - ubuf = u; - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - ELF_NGREG * sizeof(reg), -1); + for (pos = 0; pos < ELF_NGREG; pos++) + membuf_store(&to, get_reg(regs, pos)); + return 0; } static int gpr_set(struct task_struct *target, @@ -559,20 +555,21 @@ static int gpr_set(struct task_struct *target, ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); - return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - ELF_NGREG * sizeof(reg), -1); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); + return 0; } static const struct user_regset native_regsets[] = { [REGSET_GENERAL] = { - .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), - .get = gpr_get, .set = gpr_set + .regset_get = gpr_get, .set = gpr_set }, [REGSET_FP] = { - .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(__u64), .align = sizeof(__u64), - .get = fpr_get, .set = fpr_set + .regset_get = fpr_get, .set = fpr_set } }; @@ -582,35 +579,17 @@ static const struct user_regset_view user_parisc_native_view = { }; #ifdef CONFIG_64BIT -#include <linux/compat.h> - static int gpr32_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { struct pt_regs *regs = task_regs(target); - compat_ulong_t *k = kbuf; - compat_ulong_t __user *u = ubuf; - compat_ulong_t reg; - - pos /= sizeof(reg); - count /= sizeof(reg); + unsigned int pos; - if (kbuf) - for (; count > 0 && pos < ELF_NGREG; --count) - *k++ = get_reg(regs, pos++); - else - for (; count > 0 && pos < ELF_NGREG; --count) - if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++)) - return -EFAULT; + for (pos = 0; pos < ELF_NGREG; pos++) + membuf_store(&to, (compat_ulong_t)get_reg(regs, pos)); - kbuf = k; - ubuf = u; - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - ELF_NGREG * sizeof(reg), -1); + return 0; } static int gpr32_set(struct task_struct *target, @@ -640,8 +619,9 @@ static int gpr32_set(struct task_struct *target, ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); - return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - ELF_NGREG * sizeof(reg), -1); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); + return 0; } /* @@ -649,14 +629,14 @@ static int gpr32_set(struct task_struct *target, */ static const struct user_regset compat_regsets[] = { [REGSET_GENERAL] = { - .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), - .get = gpr32_get, .set = gpr32_set + .regset_get = gpr32_get, .set = gpr32_set }, [REGSET_FP] = { - .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(__u64), .align = sizeof(__u64), - .get = fpr_get, .set = fpr_set + .regset_get = fpr_get, .set = fpr_set } }; @@ -776,3 +756,38 @@ const char *regs_query_register_name(unsigned int offset) return roff->name; return NULL; } + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr -= n; + + if (!regs_within_kernel_stack(regs, (unsigned long)addr)) + return 0; + + return *addr; +} diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 2b16d8d6598f..509d18b8e0e6 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -15,28 +15,15 @@ #include <linux/linkage.h> - - .section .bss - - .export pdc_result - .export pdc_result2 - .align 8 -pdc_result: - .block ASM_PDC_RESULT_SIZE -pdc_result2: - .block ASM_PDC_RESULT_SIZE - .export real_stack - .export real32_stack .export real64_stack - .align 64 + __PAGE_ALIGNED_BSS real_stack: -real32_stack: real64_stack: .block 8192 #define N_SAVED_REGS 9 - + .section .bss save_cr_space: .block REG_SZ * N_SAVED_REGS save_cr_end: @@ -248,9 +235,6 @@ ENTRY_CFI(real64_call_asm) /* save fn */ copy %arg2, %r31 - /* set up the new ap */ - ldo 64(%arg1), %r29 - /* load up the arg registers from the saved arg area */ /* 32-bit calling convention passes first 4 args in registers */ ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */ @@ -262,7 +246,9 @@ ENTRY_CFI(real64_call_asm) ldd 7*REG_SZ(%arg1), %r19 ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */ + /* set up real-mode stack and real-mode ap */ tophys_r1 %sp + ldo -16(%sp), %r29 /* Reference param save area */ b,l rfi_virt2real,%r2 nop diff --git a/arch/parisc/kernel/relocate_kernel.S b/arch/parisc/kernel/relocate_kernel.S new file mode 100644 index 000000000000..2561e52b8d9b --- /dev/null +++ b/arch/parisc/kernel/relocate_kernel.S @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <linux/kexec.h> + +#include <asm/assembly.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/setup.h> +#include <asm/psw.h> + +.level PA_ASM_LEVEL + +.macro kexec_param name +.align 8 +ENTRY(kexec\()_\name) +#ifdef CONFIG_64BIT + .dword 0 +#else + .word 0 +#endif + +ENTRY(kexec\()_\name\()_offset) + .word kexec\()_\name - relocate_new_kernel +.endm + +.text + +/* args: + * r26 - kimage->head + * r25 - start address of kernel + * r24 - physical address of relocate code + */ + +ENTRY_CFI(relocate_new_kernel) +0: copy %arg1, %rp + /* disable I and Q bit, so we are allowed to execute RFI */ + rsm PSW_SM_I, %r0 + nop + nop + nop + nop + nop + nop + nop + + rsm PSW_SM_Q, %r0 + nop + nop + nop + nop + nop + nop + nop + + /* + * After return-from-interrupt, we want to run without Code/Data + * translation enabled just like on a normal boot. + */ + + /* calculate new physical execution address */ + ldo 1f-0b(%arg2), %r1 + mtctl %r0, %cr17 /* IIASQ */ + mtctl %r0, %cr17 /* IIASQ */ + mtctl %r1, %cr18 /* IIAOQ */ + ldo 4(%r1),%r1 + mtctl %r1, %cr18 /* IIAOQ */ +#ifdef CONFIG_64BIT + depdi,z 1, PSW_W_BIT, 1, %r1 + mtctl %r1, %cr22 /* IPSW */ +#else + mtctl %r0, %cr22 /* IPSW */ +#endif + /* lets go... */ + rfi +1: nop + nop + +.Lloop: + LDREG,ma REG_SZ(%arg0), %r3 + /* If crash kernel, no copy needed */ + cmpib,COND(=),n 0,%r3,boot + + bb,<,n %r3, 31 - IND_DONE_BIT, boot + bb,>=,n %r3, 31 - IND_INDIRECTION_BIT, .Lnotind + /* indirection, load and restart */ + movb %r3, %arg0, .Lloop + depi 0, 31, PAGE_SHIFT, %arg0 + +.Lnotind: + bb,>=,n %r3, 31 - IND_DESTINATION_BIT, .Lnotdest + b .Lloop + copy %r3, %r20 + +.Lnotdest: + bb,>= %r3, 31 - IND_SOURCE_BIT, .Lloop + depi 0, 31, PAGE_SHIFT, %r3 + copy %r3, %r21 + + /* copy page */ + copy %r0, %r18 + zdepi 1, 31 - PAGE_SHIFT, 1, %r18 + add %r20, %r18, %r17 + + depi 0, 31, PAGE_SHIFT, %r20 +.Lcopy: + copy %r20, %r12 + LDREG,ma REG_SZ(%r21), %r8 + LDREG,ma REG_SZ(%r21), %r9 + LDREG,ma REG_SZ(%r21), %r10 + LDREG,ma REG_SZ(%r21), %r11 + STREG,ma %r8, REG_SZ(%r20) + STREG,ma %r9, REG_SZ(%r20) + STREG,ma %r10, REG_SZ(%r20) + STREG,ma %r11, REG_SZ(%r20) + +#ifndef CONFIG_64BIT + LDREG,ma REG_SZ(%r21), %r8 + LDREG,ma REG_SZ(%r21), %r9 + LDREG,ma REG_SZ(%r21), %r10 + LDREG,ma REG_SZ(%r21), %r11 + STREG,ma %r8, REG_SZ(%r20) + STREG,ma %r9, REG_SZ(%r20) + STREG,ma %r10, REG_SZ(%r20) + STREG,ma %r11, REG_SZ(%r20) +#endif + + fdc %r0(%r12) + cmpb,COND(<<) %r20,%r17,.Lcopy + fic (%sr4, %r12) + b,n .Lloop + +boot: + mtctl %r0, %cr15 + + LDREG kexec_free_mem-0b(%arg2), %arg0 + LDREG kexec_cmdline-0b(%arg2), %arg1 + LDREG kexec_initrd_end-0b(%arg2), %arg3 + LDREG kexec_initrd_start-0b(%arg2), %arg2 + bv,n %r0(%rp) + +ENDPROC_CFI(relocate_new_kernel); + +ENTRY(relocate_new_kernel_size) + .word relocate_new_kernel_size - relocate_new_kernel + +kexec_param cmdline +kexec_param initrd_start +kexec_param initrd_end +kexec_param free_mem diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index f2cf86ac279b..ace483b6f19a 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Initial setup-routines for HP 9000 based hardware. * @@ -9,21 +10,6 @@ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net> * * Initial PA-RISC Version: 04-23-1999 by Helge Deller - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #include <linux/kernel.h> @@ -40,11 +26,11 @@ #include <linux/sched/clock.h> #include <linux/start_kernel.h> +#include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/pdc.h> #include <asm/led.h> -#include <asm/machdep.h> /* for pa7300lc_init() proto */ #include <asm/pdc_chassis.h> #include <asm/io.h> #include <asm/setup.h> @@ -53,40 +39,48 @@ static char __initdata command_line[COMMAND_LINE_SIZE]; -/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ -struct proc_dir_entry * proc_runway_root __read_mostly = NULL; -struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; -struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; - -void __init setup_cmdline(char **cmdline_p) +static void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; + char *p; - /* Collect stuff passed in from the boot loader */ + *cmdline_p = command_line; /* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */ - if (boot_args[0] < 64) { - /* called from hpux boot loader */ - boot_command_line[0] = '\0'; - } else { - strlcpy(boot_command_line, (char *)__va(boot_args[1]), - COMMAND_LINE_SIZE); + if (boot_args[0] < 64) + return; /* return if called from hpux boot loader */ + + /* Collect stuff passed in from the boot loader */ + strscpy(boot_command_line, (char *)__va(boot_args[1]), + COMMAND_LINE_SIZE); + + /* autodetect console type (if not done by palo yet) */ + p = boot_command_line; + if (!str_has_prefix(p, "console=") && !strstr(p, " console=")) { + strlcat(p, " console=", COMMAND_LINE_SIZE); + if (PAGE0->mem_cons.cl_class == CL_DUPLEX) + strlcat(p, "ttyS0", COMMAND_LINE_SIZE); + else + strlcat(p, "tty0", COMMAND_LINE_SIZE); + } + + /* default to use early console */ + if (!strstr(p, "earlycon")) + strlcat(p, " earlycon=pdc", COMMAND_LINE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD - if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ - { - initrd_start = (unsigned long)__va(boot_args[2]); - initrd_end = (unsigned long)__va(boot_args[3]); - } -#endif + /* did palo pass us a ramdisk? */ + if (boot_args[2] != 0) { + initrd_start = (unsigned long)__va(boot_args[2]); + initrd_end = (unsigned long)__va(boot_args[3]); } +#endif - strcpy(command_line, boot_command_line); - *cmdline_p = command_line; + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); } #ifdef CONFIG_PA11 -void __init dma_ops_init(void) +static void __init dma_ops_init(void) { switch (boot_cpu_data.cpu_type) { case pcx: @@ -98,21 +92,14 @@ void __init dma_ops_init(void) "the PA-RISC 1.1 or 2.0 architecture specification.\n"); case pcxl2: - pa7300lc_init(); - break; default: break; } } #endif -extern void collect_boot_cpu_data(void); - void __init setup_arch(char **cmdline_p) { -#ifdef CONFIG_64BIT - extern int parisc_narrow_firmware; -#endif unwind_init(); init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ @@ -141,8 +128,6 @@ void __init setup_arch(char **cmdline_p) if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE) panic("KERNEL_INITIAL_ORDER too small!"); - pdc_console_init(); - #ifdef CONFIG_64BIT if(parisc_narrow_firmware) { printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n"); @@ -155,28 +140,16 @@ void __init setup_arch(char **cmdline_p) parisc_cache_init(); paging_init(); -#ifdef CONFIG_CHASSIS_LCD_LED - /* initialize the LCD/LED after boot_cpu_data is available ! */ - led_init(); /* LCD/LED initialization */ -#endif - #ifdef CONFIG_PA11 dma_ops_init(); #endif -#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) - conswitchp = &dummy_con; /* we use do_take_over_console() later ! */ -#endif - clear_sched_clock_stable(); } /* * Display CPU info for all CPUs. - * for parisc this is in processor.c */ -extern int show_cpuinfo (struct seq_file *m, void *v); - static void * c_start (struct seq_file *m, loff_t *pos) { @@ -207,48 +180,6 @@ const struct seq_operations cpuinfo_op = { .show = show_cpuinfo }; -static void __init parisc_proc_mkdir(void) -{ - /* - ** Can't call proc_mkdir() until after proc_root_init() has been - ** called by start_kernel(). In other words, this code can't - ** live in arch/.../setup.c because start_parisc() calls - ** start_kernel(). - */ - switch (boot_cpu_data.cpu_type) { - case pcxl: - case pcxl2: - if (NULL == proc_gsc_root) - { - proc_gsc_root = proc_mkdir("bus/gsc", NULL); - } - break; - case pcxt_: - case pcxu: - case pcxu_: - case pcxw: - case pcxw_: - case pcxw2: - if (NULL == proc_runway_root) - { - proc_runway_root = proc_mkdir("bus/runway", NULL); - } - break; - case mako: - case mako2: - if (NULL == proc_mckinley_root) - { - proc_mckinley_root = proc_mkdir("bus/mckinley", NULL); - } - break; - default: - /* FIXME: this was added to prevent the compiler - * complaining about missing pcx, pcxs and pcxt - * I'm assuming they have neither gsc nor runway */ - break; - } -} - static struct resource central_bus = { .name = "Central Bus", .start = F_EXTEND(0xfff80000), @@ -285,7 +216,7 @@ static int __init parisc_init_resources(void) result = request_resource(&iomem_resource, &local_broadcast); if (result < 0) { printk(KERN_ERR - "%s: failed to claim %saddress space!\n", + "%s: failed to claim %s address space!\n", __FILE__, local_broadcast.name); return result; } @@ -301,21 +232,10 @@ static int __init parisc_init_resources(void) return 0; } -extern void gsc_init(void); -extern void processor_init(void); -extern void ccio_init(void); -extern void hppb_init(void); -extern void dino_init(void); -extern void iosapic_init(void); -extern void lba_init(void); -extern void sba_init(void); -extern void eisa_init(void); - static int __init parisc_init(void) { u32 osid = (OS_ID_LINUX << 16); - parisc_proc_mkdir(); parisc_init_resources(); do_device_inventory(); /* probe for hardware */ @@ -342,60 +262,26 @@ static int __init parisc_init(void) boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); - apply_alternatives_all(); - parisc_setup_cache_timing(); - - /* These are in a non-obvious order, will fix when we have an iotree */ -#if defined(CONFIG_IOSAPIC) - iosapic_init(); -#endif -#if defined(CONFIG_IOMMU_SBA) - sba_init(); -#endif -#if defined(CONFIG_PCI_LBA) - lba_init(); -#endif - - /* CCIO before any potential subdevices */ -#if defined(CONFIG_IOMMU_CCIO) - ccio_init(); -#endif - - /* - * Need to register Asp & Wax before the EISA adapters for the IRQ - * regions. EISA must come before PCI to be sure it gets IRQ region - * 0. - */ -#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX) - gsc_init(); -#endif -#ifdef CONFIG_EISA - eisa_init(); -#endif - -#if defined(CONFIG_HPPB) - hppb_init(); -#endif - -#if defined(CONFIG_GSC_DINO) - dino_init(); -#endif - -#ifdef CONFIG_CHASSIS_LCD_LED - register_led_regions(); /* register LED port info in procfs */ +#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) + /* Don't serialize TLB flushes if we run on one CPU only. */ + if (num_online_cpus() == 1) + pa_serialize_tlb_flushes = 0; #endif + apply_alternatives_all(); + parisc_setup_cache_timing(); return 0; } arch_initcall(parisc_init); void __init start_parisc(void) { - extern void early_trap_init(void); - int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; + /* check QEMU/SeaBIOS marker in PAGE0 */ + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); + cpunum = smp_processor_id(); init_cpu_topology(); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 848c1934680b..e8d27def6c52 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -1,16 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/signal.c: Architecture-specific signal - * handling support. + * PA-RISC architecture-specific signal handling support. * * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> * Copyright (C) 2000 Linuxcare, Inc. + * Copyright (C) 2000-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> * * Based on the ia64, i386, and alpha versions. - * - * Like the IA-64, we are a recent enough port (we are *starting* - * with glibc2.2) that we do not need to support the old non-realtime - * Linux signals. Therefore we don't. */ #include <linux/sched.h> @@ -22,7 +19,7 @@ #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> -#include <linux/tracehook.h> +#include <linux/resume_user_mode.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/compat.h> @@ -30,9 +27,9 @@ #include <asm/ucontext.h> #include <asm/rt_sigframe.h> #include <linux/uaccess.h> -#include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> +#include <asm/vdso.h> #ifdef CONFIG_COMPAT #include "signal32.h" @@ -60,14 +57,6 @@ * Do a signal return - restore sigcontext. */ -/* Trampoline for calling rt_sigreturn() */ -#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */ -#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */ -#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */ -#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */ -/* For debugging */ -#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */ - static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { @@ -78,13 +67,13 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq)); err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq)); err |= __get_user(regs->sar, &sc->sc_sar); - DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n", - regs->iaoq[0],regs->iaoq[1]); - DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]); + DBG(2, "%s: iaoq is %#lx / %#lx\n", + __func__, regs->iaoq[0], regs->iaoq[1]); + DBG(2, "%s: r28 is %ld\n", __func__, regs->gr[28]); return err; } -void +asmlinkage void sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) { struct rt_sigframe __user *frame; @@ -103,7 +92,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) /* Unwind the user stack to get the rt_sigframe structure. */ frame = (struct rt_sigframe __user *) (usp - sigframe_size); - DBG(2,"sys_rt_sigreturn: frame is %p\n", frame); + DBG(2, "%s: frame is %p pid %d\n", __func__, frame, task_pid_nr(current)); regs->orig_r28 = 1; /* no restarts for sigreturn */ @@ -111,7 +100,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { - DBG(2,"sys_rt_sigreturn: ELF32 process.\n"); if (get_compat_sigset(&set, &compat_frame->uc.uc_sigmask)) goto give_sigsegv; } else @@ -126,25 +114,25 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) /* Good thing we saved the old gr[30], eh? */ #ifdef CONFIG_64BIT if (is_compat_task()) { - DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n", - &compat_frame->uc.uc_mcontext); + DBG(1, "%s: compat_frame->uc.uc_mcontext 0x%p\n", + __func__, &compat_frame->uc.uc_mcontext); // FIXME: Load upper half from register file if (restore_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs)) goto give_sigsegv; - DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", - usp, &compat_frame->uc.uc_stack); + DBG(1, "%s: usp %#08lx stack 0x%p\n", + __func__, usp, &compat_frame->uc.uc_stack); if (compat_restore_altstack(&compat_frame->uc.uc_stack)) goto give_sigsegv; } else #endif { - DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n", - &frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc.uc_mcontext 0x%p\n", + __func__, &frame->uc.uc_mcontext); if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) goto give_sigsegv; - DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", - usp, &frame->uc.uc_stack); + DBG(1, "%s: usp %#08lx stack 0x%p\n", + __func__, usp, &frame->uc.uc_stack); if (restore_altstack(&frame->uc.uc_stack)) goto give_sigsegv; } @@ -156,15 +144,12 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) */ if (in_syscall) regs->gr[31] = regs->iaoq[0]; -#if DEBUG_SIG - DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]); - show_regs(regs); -#endif + return; give_sigsegv: - DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n"); - force_sig(SIGSEGV, current); + DBG(1, "%s: Sending SIGSEGV\n", __func__); + force_sig(SIGSEGV); return; } @@ -178,20 +163,20 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) /*FIXME: ELF32 vs. ELF64 has different frame_size, but since we don't use the parameter it doesn't matter */ - DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", - (unsigned long)ka, sp, frame_size); + DBG(1, "%s: ka = %#lx, sp = %#lx, frame_size = %zu\n", + __func__, (unsigned long)ka, sp, frame_size); /* Align alternate stack and reserve 64 bytes for the signal handler's frame marker. */ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ - DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); + DBG(1, "%s: Returning sp = %#lx\n", __func__, (unsigned long)sp); return (void __user *) sp; /* Stacks grow up. Fun. */ } static long -setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall) +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, long in_syscall) { unsigned long flags = 0; @@ -206,55 +191,65 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_sysc err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]); err |= __put_user(regs->sr[3], &sc->sc_iasq[0]); err |= __put_user(regs->sr[3], &sc->sc_iasq[1]); - DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n", - regs->gr[31], regs->gr[31]+4); + DBG(1, "%s: iaoq %#lx / %#lx (in syscall)\n", + __func__, regs->gr[31], regs->gr[31]+4); } else { err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq)); err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq)); - DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n", - regs->iaoq[0], regs->iaoq[1]); + DBG(1, "%s: iaoq %#lx / %#lx (not in syscall)\n", + __func__, regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); err |= __put_user(regs->sar, &sc->sc_sar); - DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]); + DBG(1, "%s: r28 is %ld\n", __func__, regs->gr[28]); return err; } static long setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, - int in_syscall) + long in_syscall) { struct rt_sigframe __user *frame; unsigned long rp, usp; unsigned long haddr, sigframe_size; - unsigned long start, end; + unsigned long start; int err = 0; #ifdef CONFIG_64BIT struct compat_rt_sigframe __user * compat_frame; #endif usp = (regs->gr[30] & ~(0x01UL)); - /*FIXME: frame_size parameter is unused, remove it. */ - frame = get_sigframe(&ksig->ka, usp, sizeof(*frame)); + sigframe_size = PARISC_RT_SIGFRAME_SIZE; +#ifdef CONFIG_64BIT + if (is_compat_task()) { + /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */ + usp = (compat_uint_t)usp; + sigframe_size = PARISC_RT_SIGFRAME_SIZE32; + } +#endif + frame = get_sigframe(&ksig->ka, usp, sigframe_size); - DBG(1,"SETUP_RT_FRAME: START\n"); - DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info); + DBG(1, "%s: frame %p info %p\n", __func__, frame, &ksig->info); + start = (unsigned long) frame; + if (start >= TASK_SIZE_MAX - sigframe_size) + return -EFAULT; #ifdef CONFIG_64BIT compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { - DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); + DBG(1, "%s: frame->info = 0x%p\n", __func__, &compat_frame->info); err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info); err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]); - DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc); - DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc = 0x%p\n", __func__, &compat_frame->uc); + DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n", + __func__, &compat_frame->uc.uc_mcontext); err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs, in_syscall); err |= put_compat_sigset(&compat_frame->uc.uc_sigmask, set, @@ -262,11 +257,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, } else #endif { - DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info); + DBG(1, "%s: frame->info = 0x%p\n", __func__, &frame->info); err |= copy_siginfo_to_user(&frame->info, &ksig->info); err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]); - DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc); - DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext); + DBG(1, "%s: frame->uc = 0x%p\n", __func__, &frame->uc); + DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n", + __func__, &frame->uc.uc_mcontext); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall); /* FIXME: Should probably be converted as well for the compat case */ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); @@ -275,42 +271,15 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, if (err) return -EFAULT; - /* Set up to return from userspace. If provided, use a stub - already in userspace. The first words of tramp are used to - save the previous sigrestartblock trampoline that might be - on the stack. We start the sigreturn trampoline at - SIGRESTARTBLOCK_TRAMP+X. */ - err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]); - err |= __put_user(INSN_LDI_R20, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); - err |= __put_user(INSN_BLE_SR2_R0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]); - err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]); - -#if DEBUG_SIG - /* Assert that we're flushing in the correct space... */ - { - unsigned long sid; - asm ("mfsp %%sr3,%0" : "=r" (sid)); - DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n", - sid, frame->tramp); - } +#ifdef CONFIG_64BIT + if (!is_compat_task()) + rp = VDSO64_SYMBOL(current, sigtramp_rt); + else #endif + rp = VDSO32_SYMBOL(current, sigtramp_rt); - start = (unsigned long) &frame->tramp[0]; - end = (unsigned long) &frame->tramp[TRAMP_SIZE]; - flush_user_dcache_range_asm(start, end); - flush_user_icache_range_asm(start, end); - - /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP - * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP - * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP - */ - rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; - - if (err) - return -EFAULT; + if (in_syscall) + rp += 4*4; /* skip 4 instructions and start at ldi 1,%r25 */ haddr = A(ksig->ka.sa.sa_handler); /* The sa_handler may be a pointer to a function descriptor */ @@ -341,23 +310,18 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, haddr = fdesc.addr; regs->gr[19] = fdesc.gp; - DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n", - haddr, regs->gr[19], in_syscall); + DBG(1, "%s: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n", + __func__, haddr, regs->gr[19], in_syscall); } #endif /* The syscall return path will create IAOQ values from r31. */ - sigframe_size = PARISC_RT_SIGFRAME_SIZE; -#ifdef CONFIG_64BIT - if (is_compat_task()) - sigframe_size = PARISC_RT_SIGFRAME_SIZE32; -#endif if (in_syscall) { regs->gr[31] = haddr; #ifdef CONFIG_64BIT if (!test_thread_flag(TIF_32BIT)) - sigframe_size |= 1; + sigframe_size |= 1; /* XXX ???? */ #endif } else { unsigned long psw = USER_PSW; @@ -379,11 +343,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, } regs->gr[0] = psw; - regs->iaoq[0] = haddr | 3; + regs->iaoq[0] = haddr | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; } - regs->gr[2] = rp; /* userland return pointer */ + regs->gr[2] = rp; /* userland return pointer */ regs->gr[26] = ksig->sig; /* signal number */ #ifdef CONFIG_64BIT @@ -397,15 +361,15 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, regs->gr[24] = A(&frame->uc); /* ucontext pointer */ } - DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n", + DBG(1, "%s: making sigreturn frame: %#lx + %#lx = %#lx\n", __func__, regs->gr[30], sigframe_size, regs->gr[30] + sigframe_size); /* Raise the user stack pointer to make a proper call frame. */ regs->gr[30] = (A(frame) + sigframe_size); - DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n", - current->comm, current->pid, frame, regs->gr[30], + DBG(1, "%s: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n", + __func__, current->comm, current->pid, frame, regs->gr[30], regs->iaoq[0], regs->iaoq[1], rp); return 0; @@ -416,13 +380,13 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, */ static void -handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) +handle_signal(struct ksignal *ksig, struct pt_regs *regs, long in_syscall) { int ret; sigset_t *oldset = sigmask_to_save(); - DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", - ksig->sig, ksig->ka, ksig->info, oldset, regs); + DBG(1, "%s: sig=%d, ka=%p, info=%p, oldset=%p, regs=%p\n", + __func__, ksig->sig, &ksig->ka, &ksig->info, oldset, regs); /* Set up the stack frame */ ret = setup_rt_frame(ksig, oldset, regs, in_syscall); @@ -430,8 +394,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) || test_thread_flag(TIF_BLOCKSTEP)); - DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", - regs->gr[28]); + DBG(1, "%s: Exit (success), regs->gr[28] = %ld\n", + __func__, regs->gr[28]); } /* @@ -459,7 +423,7 @@ static void check_syscallno_in_delay_branch(struct pt_regs *regs) regs->gr[31] -= 8; /* delayed branching */ /* Get assembler opcode of code in delay branch */ - uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); + uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4); err = get_user(opcode, uaddr); if (err) return; @@ -489,22 +453,27 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ + + DBG(1, "%s: orig_r28 = %ld pid %d r20 %ld\n", + __func__, regs->orig_r28, task_pid_nr(current), regs->gr[20]); + /* Check the return code */ switch (regs->gr[28]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: - DBG(1,"ERESTARTNOHAND: returning -EINTR\n"); + DBG(1, "%s: ERESTARTNOHAND: returning -EINTR\n", __func__); regs->gr[28] = -EINTR; break; - case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { - DBG(1,"ERESTARTSYS: putting -EINTR\n"); + DBG(1, "%s: ERESTARTSYS: putting -EINTR pid %d\n", + __func__, task_pid_nr(current)); regs->gr[28] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: + DBG(1, "%s: %ld\n", __func__, regs->gr[28]); check_syscallno_in_delay_branch(regs); break; } @@ -516,46 +485,52 @@ insert_restart_trampoline(struct pt_regs *regs) if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ - switch(regs->gr[28]) { + + DBG(2, "%s: gr28 = %ld pid %d\n", + __func__, regs->gr[28], task_pid_nr(current)); + + switch (regs->gr[28]) { case -ERESTART_RESTARTBLOCK: { /* Restart the system call - no handlers present */ unsigned int *usp = (unsigned int *)regs->gr[30]; - unsigned long start = (unsigned long) &usp[2]; - unsigned long end = (unsigned long) &usp[5]; + unsigned long rp; long err = 0; - /* Setup a trampoline to restart the syscall - * with __NR_restart_syscall + /* check that we don't exceed the stack */ + if (A(&usp[0]) >= TASK_SIZE_MAX - 5 * sizeof(int)) + return; + + /* Call trampoline in vdso to restart the syscall + * with __NR_restart_syscall. + * Original return addresses are on stack like this: * * 0: <return address (orig r31)> * 4: <2nd half for 64-bit> - * 8: ldw 0(%sp), %r31 - * 12: be 0x100(%sr2, %r0) - * 16: ldi __NR_restart_syscall, %r20 */ #ifdef CONFIG_64BIT - err |= put_user(regs->gr[31] >> 32, &usp[0]); - err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]); - err |= put_user(0x0fc010df, &usp[2]); -#else - err |= put_user(regs->gr[31], &usp[0]); - err |= put_user(0x0fc0109f, &usp[2]); + if (!is_compat_task()) { + err |= put_user(regs->gr[31] >> 32, &usp[0]); + err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]); + rp = VDSO64_SYMBOL(current, restart_syscall); + } else #endif - err |= put_user(0xe0008200, &usp[3]); - err |= put_user(0x34140000, &usp[4]); - + { + err |= put_user(regs->gr[31], &usp[0]); + rp = VDSO32_SYMBOL(current, restart_syscall); + } WARN_ON(err); - /* flush data/instruction cache for new insns */ - flush_user_dcache_range_asm(start, end); - flush_user_icache_range_asm(start, end); - - regs->gr[31] = regs->gr[30] + 8; + regs->gr[31] = rp; + DBG(1, "%s: ERESTART_RESTARTBLOCK\n", __func__); return; } + case -EINTR: + /* ok, was handled before and should be returned. */ + break; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: + DBG(1, "%s: Type %ld\n", __func__, regs->gr[28]); check_syscallno_in_delay_branch(regs); return; default: @@ -564,51 +539,51 @@ insert_restart_trampoline(struct pt_regs *regs) } /* - * Note that 'init' is a special process: it doesn't get signals it doesn't - * want to handle. Thus you cannot kill init even with a SIGKILL even by - * mistake. - * * We need to be able to restore the syscall arguments (r21-r26) to * restart syscalls. Thus, the syscall path should save them in the * pt_regs structure (it's okay to do so since they are caller-save * registers). As noted below, the syscall number gets restored for * us due to the magic of delayed branching. */ -asmlinkage void -do_signal(struct pt_regs *regs, long in_syscall) +static void do_signal(struct pt_regs *regs, long in_syscall) { struct ksignal ksig; + int restart_syscall; + bool has_handler; + + has_handler = get_signal(&ksig); - DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", - regs, regs->sr[7], in_syscall); + restart_syscall = 0; + if (in_syscall) + restart_syscall = 1; - if (get_signal(&ksig)) { - DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); + if (has_handler) { /* Restart a system call if necessary. */ - if (in_syscall) + if (restart_syscall) syscall_restart(regs, &ksig.ka); handle_signal(&ksig, regs, in_syscall); + DBG(1, "%s: Handled signal pid %d\n", + __func__, task_pid_nr(current)); return; } - /* Did we come from a system call? */ - if (in_syscall) + /* Do we need to restart the system call? */ + if (restart_syscall) insert_restart_trampoline(regs); - DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", - regs->gr[28]); + DBG(1, "%s: Exit (not delivered), regs->gr[28] = %ld orig_r28 = %ld pid %d\n", + __func__, regs->gr[28], regs->orig_r28, task_pid_nr(current)); restore_saved_sigmask(); } -void do_notify_resume(struct pt_regs *regs, long in_syscall) +asmlinkage void do_notify_resume(struct pt_regs *regs, long in_syscall) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); - if (test_thread_flag(TIF_NOTIFY_RESUME)) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } + if (test_thread_flag(TIF_NOTIFY_RESUME)) + resume_user_mode_work(regs); } diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index e8ef3eb69449..9a5ba57ad94d 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Signal support for 32-bit kernel builds * * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org> @@ -5,21 +6,6 @@ * * Code was mostly borrowed from kernel/signal.c. * See kernel/signal.c for additional Copyrights. - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/compat.h> diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h index a271dc0976ce..c03eb1ed4c53 100644 --- a/arch/parisc/kernel/signal32.h +++ b/arch/parisc/kernel/signal32.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _PARISC64_KERNEL_SIGNAL32_H #define _PARISC64_KERNEL_SIGNAL32_H @@ -49,21 +36,12 @@ struct compat_regfile { compat_int_t rf_sar; }; -#define COMPAT_SIGRETURN_TRAMP 4 -#define COMPAT_SIGRESTARTBLOCK_TRAMP 5 -#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \ - COMPAT_SIGRESTARTBLOCK_TRAMP) - struct compat_rt_sigframe { - /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c - Secondary to that it must protect the ERESTART_RESTARTBLOCK - trampoline we left on the stack (we were bad and didn't - change sp so we could run really fast.) */ - compat_uint_t tramp[COMPAT_TRAMP_SIZE]; - compat_siginfo_t info; - struct compat_ucontext uc; - /* Hidden location of truncated registers, *must* be last. */ - struct compat_regfile regs; + unsigned int tramp[2]; /* holds original return address */ + compat_siginfo_t info; + struct compat_ucontext uc; + /* Hidden location of truncated registers, *must* be last. */ + struct compat_regfile regs; }; /* diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 5e26dbede5fc..b2d12ab728b1 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* ** SMP Support ** @@ -11,10 +12,6 @@ ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work. ** -grant (1/12/2001) ** -** This program is free software; you can redistribute it and/or modify -** it under the terms of the GNU General Public License as published by -** the Free Software Foundation; either version 2 of the License, or -** (at your option) any later version. */ #include <linux/types.h> #include <linux/spinlock.h> @@ -32,6 +29,8 @@ #include <linux/bitops.h> #include <linux/ftrace.h> #include <linux/cpu.h> +#include <linux/kgdb.h> +#include <linux/sched/hotplug.h> #include <linux/atomic.h> #include <asm/current.h> @@ -42,8 +41,6 @@ #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ #include <asm/mmu_context.h> #include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/unistd.h> @@ -64,8 +61,6 @@ volatile struct task_struct *smp_init_current_idle_task; /* track which CPU is booting */ static volatile int cpu_now_booting; -static int parisc_max_cpus = 1; - static DEFINE_PER_CPU(spinlock_t, ipi_lock); enum ipi_message_type { @@ -74,7 +69,10 @@ enum ipi_message_type { IPI_CALL_FUNC, IPI_CPU_START, IPI_CPU_STOP, - IPI_CPU_TEST + IPI_CPU_TEST, +#ifdef CONFIG_KGDB + IPI_ENTER_KGDB, +#endif }; @@ -112,6 +110,7 @@ halt_processor(void) /* REVISIT : does PM *know* this CPU isn't available? */ set_cpu_online(smp_processor_id(), false); local_irq_disable(); + __pdc_cpu_rendezvous(); for (;;) ; } @@ -155,6 +154,7 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); + inc_irq_stat(irq_call_count); generic_smp_call_function_interrupt(); break; @@ -170,15 +170,23 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CPU_TEST: smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; - +#ifdef CONFIG_KGDB + case IPI_ENTER_KGDB: + smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu); + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); + break; +#endif default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); return IRQ_NONE; } /* Switch */ - /* let in any pending interrupts */ - local_irq_enable(); - local_irq_disable(); + + /* before doing more, let in any pending interrupts */ + if (ops) { + local_irq_enable(); + local_irq_disable(); + } } /* while (ops) */ } return IRQ_HANDLED; @@ -219,19 +227,27 @@ static inline void send_IPI_allbutself(enum ipi_message_type op) { int i; - + + preempt_disable(); for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } + preempt_enable(); } +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(void) +{ + send_IPI_allbutself(IPI_ENTER_KGDB); +} +#endif inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } -void -smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } +void +arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } void smp_send_all_nop(void) @@ -252,12 +268,9 @@ void arch_send_call_function_single_ipi(int cpu) /* * Called by secondaries to update state and initialize CPU registers. */ -static void __init +static void smp_cpu_init(int cpunum) { - extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ - extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ - /* Set modes and Enable floating point coprocessor */ init_per_cpu(cpunum); @@ -284,7 +297,7 @@ smp_cpu_init(int cpunum) enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ - start_cpu_itimer(); + parisc_clockevent_init(); } @@ -292,7 +305,7 @@ smp_cpu_init(int cpunum) * Slaves start using C here. Indirectly called from smp_slave_stext. * Do what start_kernel() and main() do for boot strap processor (aka monarch) */ -void __init smp_callin(unsigned long pdce_proc) +void smp_callin(unsigned long pdce_proc) { int slave_id = cpu_now_booting; @@ -302,7 +315,6 @@ void __init smp_callin(unsigned long pdce_proc) #endif smp_cpu_init(slave_id); - preempt_disable(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); @@ -318,12 +330,27 @@ void __init smp_callin(unsigned long pdce_proc) /* * Bring one cpu online. */ -int smp_boot_one_cpu(int cpuid, struct task_struct *idle) +static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) { const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); long timeout; - task_thread_info(idle)->cpu = cpuid; +#ifdef CONFIG_HOTPLUG_CPU + int i; + + /* reset irq statistics for this CPU */ + memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); + for (i = 0; i < NR_IRQS; i++) { + struct irq_desc *desc = irq_to_desc(i); + + if (desc && desc->kstat_irqs) + *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { }; + } +#endif + + /* wait until last booting CPU has started. */ + while (cpu_now_booting) + ; /* Let _start know what logical CPU we're booting ** (offset into init_tasks[],cpu_data[]) @@ -360,7 +387,6 @@ int smp_boot_one_cpu(int cpuid, struct task_struct *idle) if(cpu_online(cpuid)) { /* Which implies Slave has started up */ cpu_now_booting = 0; - smp_init_current_idle_task = NULL; goto alive ; } udelay(100); @@ -378,13 +404,7 @@ alive: void __init smp_prepare_boot_cpu(void) { - int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; - - /* Setup BSP mappings */ - printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); - - set_cpu_online(bootstrap_processor, true); - set_cpu_present(bootstrap_processor, true); + pr_info("SMP: bootstrap CPU ID is 0\n"); } @@ -401,30 +421,87 @@ void __init smp_prepare_cpus(unsigned int max_cpus) spin_lock_init(&per_cpu(ipi_lock, cpu)); init_cpu_present(cpumask_of(0)); - - parisc_max_cpus = max_cpus; - if (!max_cpus) - printk(KERN_INFO "SMP mode deactivated.\n"); } -void smp_cpus_done(unsigned int cpu_max) +void __init smp_cpus_done(unsigned int cpu_max) { - return; } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle)) - return -ENOSYS; + if (cpu_online(cpu)) + return 0; + + if (num_online_cpus() < nr_cpu_ids && + num_online_cpus() < setup_max_cpus && + smp_boot_one_cpu(cpu, tidle)) + return -EIO; + + return cpu_online(cpu) ? 0 : -EIO; +} + +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpu_disable(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + unsigned int cpu = smp_processor_id(); + + remove_cpu_topology(cpu); + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* Find a new timesync master */ + if (cpu == time_keeper_id) { + time_keeper_id = cpumask_first(cpu_online_mask); + pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id); + } + + disable_percpu_irq(IPI_IRQ); + + irq_migrate_all_off_this_cpu(); + + flush_cache_all_local(); + flush_tlb_all_local(NULL); + + /* disable all irqs, including timer irq */ + local_irq_disable(); + + /* wait for next timer irq ... */ + mdelay(1000/HZ+100); + + /* ... and then clear all pending external irqs */ + set_eiem(0); + mtctl(~0UL, CR_EIRR); + mfctl(CR_EIRR); + mtctl(0, CR_EIRR); +#endif + return 0; +} - return cpu_online(cpu) ? 0 : -ENOSYS; +/* + * called on the thread which is asking for a CPU to be shutdown - + * waits until shutdown has completed, or it is timed out. + */ +void __cpu_die(unsigned int cpu) +{ + pdc_cpu_rendezvous_lock(); } -#ifdef CONFIG_PROC_FS -int setup_profiling_timer(unsigned int multiplier) +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { - return -EINVAL; + pr_info("CPU%u: is shutting down\n", cpu); + + /* set task's state to interruptible sleep */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ); + + pdc_cpu_rendezvous_unlock(); } -#endif diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c index ec5835e83a7a..023834ef582e 100644 --- a/arch/parisc/kernel/stacktrace.c +++ b/arch/parisc/kernel/stacktrace.c @@ -1,50 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Stack trace management functions * - * Copyright (C) 2009 Helge Deller <deller@gmx.de> + * Copyright (C) 2009-2021 Helge Deller <deller@gmx.de> * based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com> * and parisc unwind functions by Randolph Chung <tausq@debian.org> * * TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT) */ -#include <linux/module.h> +#include <linux/kernel.h> #include <linux/stacktrace.h> #include <asm/unwind.h> -static void dump_trace(struct task_struct *task, struct stack_trace *trace) +static void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *cookie) { struct unwind_frame_info info; unwind_frame_init_task(&info, task, NULL); - - /* unwind stack and save entries in stack_trace struct */ - trace->nr_entries = 0; - while (trace->nr_entries < trace->max_entries) { + while (1) { if (unwind_once(&info) < 0 || info.ip == 0) break; if (__kernel_text_address(info.ip)) - trace->entries[trace->nr_entries++] = info.ip; + if (!fn(cookie, info.ip)) + break; } } - -/* - * Save stack-backtrace addresses into a stack_trace buffer. - */ -void save_stack_trace(struct stack_trace *trace) +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { - dump_trace(current, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; + walk_stackframe(task, regs, consume_entry, cookie); } -EXPORT_SYMBOL_GPL(save_stack_trace); -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task) { - dump_trace(tsk, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; + walk_stackframe(task, NULL, consume_entry, cookie); + return 1; } -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 376ea0d1b275..b2cdbb8a12b1 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * PARISC specific syscalls @@ -5,22 +6,7 @@ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> - * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Copyright (C) 1999-2020 Helge Deller <deller@gmx.de> */ #include <linux/uaccess.h> @@ -37,35 +23,51 @@ #include <linux/utsname.h> #include <linux/personality.h> #include <linux/random.h> +#include <linux/compat.h> +#include <linux/elf-randomize.h> -/* we construct an artificial offset for the mapping based on the physical - * address of the kernel mapping variable */ -#define GET_LAST_MMAP(filp) \ - (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) -#define SET_LAST_MMAP(filp, val) \ - { /* nothing */ } - -static int get_offset(unsigned int last_mmap) -{ - return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; -} +/* + * Construct an artificial page offset for the mapping based on the physical + * address of the kernel file mapping variable. + */ +#define GET_FILP_PGOFF(filp) \ + (filp ? (((unsigned long) filp->f_mapping) >> 8) \ + & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) -static unsigned long shared_align_offset(unsigned int last_mmap, +static unsigned long shared_align_offset(unsigned long filp_pgoff, unsigned long pgoff) { - return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; + return (filp_pgoff + pgoff) << PAGE_SHIFT; } static inline unsigned long COLOR_ALIGN(unsigned long addr, - unsigned int last_mmap, unsigned long pgoff) + unsigned long filp_pgoff, unsigned long pgoff) { unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); unsigned long off = (SHM_COLOUR-1) & - (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); - + shared_align_offset(filp_pgoff, pgoff); return base + off; } + +#define STACK_SIZE_DEFAULT (USER_WIDE_MODE \ + ? (1 << 30) /* 1 GB */ \ + : (CONFIG_STACK_MAX_DEFAULT_SIZE_MB*1024*1024)) + +unsigned long calc_max_stack_size(unsigned long stack_max) +{ +#ifdef CONFIG_COMPAT + if (!USER_WIDE_MODE && (stack_max == COMPAT_RLIM_INFINITY)) + stack_max = STACK_SIZE_DEFAULT; + else +#endif + if (stack_max == RLIM_INFINITY) + stack_max = STACK_SIZE_DEFAULT; + + return stack_max; +} + + /* * Top of mmap area (just below the process stack). */ @@ -75,108 +77,60 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr, * indicating that "current" should be used instead of a passed-in * value from the exec bprm as done with arch_pick_mmap_layout(). */ -static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) +unsigned long mmap_upper_limit(const struct rlimit *rlim_stack) { unsigned long stack_base; /* Limit stack size - see setup_arg_pages() in fs/exec.c */ stack_base = rlim_stack ? rlim_stack->rlim_max : rlimit_max(RLIMIT_STACK); - if (stack_base > STACK_SIZE_MAX) - stack_base = STACK_SIZE_MAX; + + stack_base = calc_max_stack_size(stack_base); /* Add space for stack randomization. */ - stack_base += (STACK_RND_MASK << PAGE_SHIFT); + if (current->flags & PF_RANDOMIZE) + stack_base += (STACK_RND_MASK << PAGE_SHIFT); return PAGE_ALIGN(STACK_TOP - stack_base); } +enum mmap_allocation_direction {UP, DOWN}; -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +static unsigned long arch_get_unmapped_area_common(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags, enum mmap_allocation_direction dir) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; - unsigned long task_size = TASK_SIZE; - int do_color_align, last_mmap; - struct vm_unmapped_area_info info; + unsigned long filp_pgoff; + int do_color_align; + struct vm_unmapped_area_info info = { + .length = len + }; - if (len > task_size) + if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; - last_mmap = GET_LAST_MMAP(filp); + filp_pgoff = GET_FILP_PGOFF(filp); if (flags & MAP_FIXED) { - if ((flags & MAP_SHARED) && last_mmap && - (addr - shared_align_offset(last_mmap, pgoff)) - & (SHM_COLOUR - 1)) + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ + if (TASK_SIZE - len < addr) return -EINVAL; - goto found_addr; - } - - if (addr) { - if (do_color_align && last_mmap) - addr = COLOR_ALIGN(addr, last_mmap, pgoff); - else - addr = PAGE_ALIGN(addr); - - vma = find_vma_prev(mm, addr, &prev); - if (task_size - len >= addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) - goto found_addr; - } - - info.flags = 0; - info.length = len; - info.low_limit = mm->mmap_legacy_base; - info.high_limit = mmap_upper_limit(NULL); - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; - info.align_offset = shared_align_offset(last_mmap, pgoff); - addr = vm_unmapped_area(&info); - -found_addr: - if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) - SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); - - return addr; -} -unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) -{ - struct vm_area_struct *vma, *prev; - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - int do_color_align, last_mmap; - struct vm_unmapped_area_info info; - - /* requested length too big for entire address space */ - if (len > TASK_SIZE) - return -ENOMEM; - - do_color_align = 0; - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - last_mmap = GET_LAST_MMAP(filp); - - if (flags & MAP_FIXED) { - if ((flags & MAP_SHARED) && last_mmap && - (addr - shared_align_offset(last_mmap, pgoff)) - & (SHM_COLOUR - 1)) + if ((flags & MAP_SHARED) && filp && + (addr - shared_align_offset(filp_pgoff, pgoff)) + & (SHM_COLOUR - 1)) return -EINVAL; - goto found_addr; + return addr; } - /* requesting a specific address */ if (addr) { - if (do_color_align && last_mmap) - addr = COLOR_ALIGN(addr, last_mmap, pgoff); + if (do_color_align) + addr = COLOR_ALIGN(addr, filp_pgoff, pgoff); else addr = PAGE_ALIGN(addr); @@ -184,87 +138,50 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) - goto found_addr; + return addr; } - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.low_limit = PAGE_SIZE; - info.high_limit = mm->mmap_base; - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; - info.align_offset = shared_align_offset(last_mmap, pgoff); - addr = vm_unmapped_area(&info); - if (!(addr & ~PAGE_MASK)) - goto found_addr; - VM_BUG_ON(addr != -ENOMEM); - - /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ - return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - -found_addr: - if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) - SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); - - return addr; -} - -static int mmap_is_legacy(void) -{ - if (current->personality & ADDR_COMPAT_LAYOUT) - return 1; - - /* parisc stack always grows up - so a unlimited stack should - * not be an indicator to use the legacy memory layout. - * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) - * return 1; - */ - - return sysctl_legacy_va_layout; -} - -static unsigned long mmap_rnd(void) -{ - unsigned long rnd = 0; - - if (current->flags & PF_RANDOMIZE) - rnd = get_random_int() & MMAP_RND_MASK; - - return rnd << PAGE_SHIFT; -} + info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; + info.align_offset = shared_align_offset(filp_pgoff, pgoff); + + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + return addr; + VM_BUG_ON(addr != -ENOMEM); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + } -unsigned long arch_mmap_rnd(void) -{ - return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; + info.low_limit = mm->mmap_base; + info.high_limit = mmap_upper_limit(NULL); + return vm_unmapped_area(&info); } -static unsigned long mmap_legacy_base(void) +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { - return TASK_UNMAPPED_BASE + mmap_rnd(); + return arch_get_unmapped_area_common(filp, + addr, len, pgoff, flags, UP); } -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { - mm->mmap_legacy_base = mmap_legacy_base(); - mm->mmap_base = mmap_upper_limit(rlim_stack); - - if (mmap_is_legacy()) { - mm->mmap_base = mm->mmap_legacy_base; - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } + return arch_get_unmapped_area_common(filp, + addr, len, pgoff, flags, DOWN); } - asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) @@ -372,7 +289,7 @@ asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo, ((u64)lenhi << 32) | lenlo); } -long parisc_personality(unsigned long personality) +asmlinkage long parisc_personality(unsigned long personality) { long err; @@ -386,3 +303,103 @@ long parisc_personality(unsigned long personality) return err; } + +/* + * Up to kernel v5.9 we defined O_NONBLOCK as 000200004, + * since then O_NONBLOCK is defined as 000200000. + * + * The following wrapper functions mask out the old + * O_NDELAY bit from calls which use O_NONBLOCK. + * + * XXX: Remove those in year 2022 (or later)? + */ + +#define O_NONBLOCK_OLD 000200004 +#define O_NONBLOCK_MASK_OUT (O_NONBLOCK_OLD & ~O_NONBLOCK) + +static int FIX_O_NONBLOCK(int flags) +{ + if ((flags & O_NONBLOCK_MASK_OUT) && + !test_thread_flag(TIF_NONBLOCK_WARNING)) { + set_thread_flag(TIF_NONBLOCK_WARNING); + pr_warn("%s(%d) uses a deprecated O_NONBLOCK value." + " Please recompile with newer glibc.\n", + current->comm, current->pid); + } + return flags & ~O_NONBLOCK_MASK_OUT; +} + +asmlinkage long parisc_timerfd_create(int clockid, int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_timerfd_create(clockid, flags); +} + +asmlinkage long parisc_signalfd4(int ufd, sigset_t __user *user_mask, + size_t sizemask, int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_signalfd4(ufd, user_mask, sizemask, flags); +} + +#ifdef CONFIG_COMPAT +asmlinkage long parisc_compat_signalfd4(int ufd, + compat_sigset_t __user *user_mask, + compat_size_t sizemask, int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return compat_sys_signalfd4(ufd, user_mask, sizemask, flags); +} +#endif + +asmlinkage long parisc_eventfd2(unsigned int count, int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_eventfd2(count, flags); +} + +asmlinkage long parisc_userfaultfd(int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_userfaultfd(flags); +} + +asmlinkage long parisc_pipe2(int __user *fildes, int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_pipe2(fildes, flags); +} + +asmlinkage long parisc_inotify_init1(int flags) +{ + flags = FIX_O_NONBLOCK(flags); + return sys_inotify_init1(flags); +} + +/* + * madvise() wrapper + * + * Up to kernel v6.1 parisc has different values than all other + * platforms for the MADV_xxx flags listed below. + * To keep binary compatibility with existing userspace programs + * translate the former values to the new values. + * + * XXX: Remove this wrapper in year 2025 (or later) + */ + +asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior) +{ + switch (behavior) { + case 65: behavior = MADV_MERGEABLE; break; + case 66: behavior = MADV_UNMERGEABLE; break; + case 67: behavior = MADV_HUGEPAGE; break; + case 68: behavior = MADV_NOHUGEPAGE; break; + case 69: behavior = MADV_DONTDUMP; break; + case 70: behavior = MADV_DODUMP; break; + case 71: behavior = MADV_WIPEONFORK; break; + case 72: behavior = MADV_KEEPONFORK; break; + case 73: behavior = MADV_COLLAPSE; break; + } + + return sys_madvise(start, len_in, behavior); +} diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 2a12a547b447..826c8e51b585 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } - -asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, - compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, - const char __user * pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, - ((__u64)mask1 << 32) | mask0, - dfd, pathname); -} diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 4f77bd9be66b..f58c4bccfbce 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -39,6 +39,7 @@ registers). #include <asm/assembly.h> #include <asm/processor.h> #include <asm/cache.h> +#include <asm/spinlock_types.h> #include <linux/linkage.h> @@ -48,7 +49,33 @@ registers). */ #define KILL_INSN break 0,0 - .level LEVEL + .level PA_ASM_LEVEL + + .macro lws_pagefault_disable reg1,reg2 + mfctl %cr30, \reg2 + ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2 + ldw 0(%sr2,\reg2), \reg1 + ldo 1(\reg1), \reg1 + stw \reg1, 0(%sr2,\reg2) + .endm + + .macro lws_pagefault_enable reg1,reg2 + mfctl %cr30, \reg2 + ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2 + ldw 0(%sr2,\reg2), \reg1 + ldo -1(\reg1), \reg1 + stw \reg1, 0(%sr2,\reg2) + .endm + + /* raise exception if spinlock content is not zero or + * __ARCH_SPIN_LOCK_UNLOCKED_VAL */ + .macro spinlock_check spin_val,tmpreg +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg + andcm,= \spin_val, \tmpreg, %r0 + .word SPINLOCK_BREAK_INSN +#endif + .endm .text @@ -74,11 +101,11 @@ ENTRY(linux_gateway_page) /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ -#define __NR_lws_entries (3) +#define __NR_lws_entries (5) lws_entry: gate lws_start, %r0 /* increase privilege */ - depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ + depi PRIV_USER, 31, 2, %r31 /* Ensure we return into user mode. */ /* Fill from 0xb8 to 0xe0 */ .rept 10 @@ -89,7 +116,7 @@ lws_entry: mechanism to work. DO NOT MOVE THIS CODE EVER! */ set_thread_pointer: gate .+8, %r0 /* increase privilege */ - depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ + depi PRIV_USER, 31, 2, %r31 /* Ensure we return into user mode. */ be 0(%sr7,%r31) /* return to user space */ mtctl %r26, %cr27 /* move arg0 to the control register */ @@ -139,9 +166,9 @@ linux_gateway_entry: xor %r1,%r30,%r30 /* ye olde xor trick */ xor %r1,%r30,%r1 xor %r1,%r30,%r30 - - ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */ + LDREG TASK_STACK(%r30),%r30 /* set up kernel stack */ + ldo FRAME_SIZE(%r30),%r30 /* N.B.: It is critical that we don't set sr7 to 0 until r30 * contains a valid kernel stack pointer. It is also * critical that we don't start using the kernel stack @@ -152,7 +179,6 @@ linux_gateway_entry: ssm PSW_SM_I, %r0 /* enable interrupts */ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ mfctl %cr30,%r1 /* get task ptr in %r1 */ - LDREG TI_TASK(%r1),%r1 /* Save some registers for sigcontext and potential task switch (see entry.S for the details of which ones are @@ -207,7 +233,7 @@ linux_gateway_entry: /* Are we being ptraced? */ mfctl %cr30, %r1 - LDREG TI_FLAGS(%r1),%r1 + LDREG TASK_TI_FLAGS(%r1),%r1 ldi _TIF_SYSCALL_TRACE_MASK, %r19 and,COND(=) %r1, %r19, %r0 b,n .Ltracesys @@ -217,10 +243,10 @@ linux_gateway_entry: #ifdef CONFIG_64BIT ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -272,8 +298,7 @@ tracesys: * C bit set, a non-straced syscall entry results in C and D clear * in the saved PSW. */ - ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + mfctl %cr30,%r1 /* get task ptr */ ssm 0,%r2 STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */ mfsp %sr0,%r2 @@ -327,8 +352,7 @@ tracesys_next: */ copy %ret0,%r20 - ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + mfctl %cr30,%r1 /* get task ptr */ LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */ LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 @@ -355,10 +379,10 @@ tracesys_next: extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -385,16 +409,14 @@ tracesys_next: makes a direct call to syscall_trace. */ tracesys_exit: - ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + mfctl %cr30,%r1 /* get task ptr */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif ldo TASK_REGS(%r1),%r26 BL do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ - ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + mfctl %cr30,%r1 /* get task ptr */ LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */ ldil L%syscall_exit,%r1 @@ -407,8 +429,7 @@ tracesys_exit: ldo R%tracesys_sigexit(%r2),%r2 tracesys_sigexit: - ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + mfctl %cr30,%r1 /* get task ptr */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -478,7 +499,7 @@ lws_start: extrd,u %r1,PSW_W_BIT,1,%r1 /* sp must be aligned on 4, so deposit the W bit setting into * the bottom of sp temporarily */ - or,ev %r1,%r30,%r30 + or,od %r1,%r30,%r30 /* Clip LWS number to a 32-bit value for 32-bit processes */ depdi 0, 31, 32, %r20 @@ -496,8 +517,36 @@ lws_start: /* Jump to lws, lws table pointers already relocated */ be,n 0(%sr2,%r21) +lws_exit_noerror: + lws_pagefault_enable %r1,%r21 + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + b lws_exit + copy %r0, %r21 + +lws_wouldblock: + ssm PSW_SM_I, %r0 + ldo 2(%r0), %r28 + b lws_exit + ldo -EAGAIN(%r0), %r21 + +lws_pagefault: + lws_pagefault_enable %r1,%r21 + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + ldo 3(%r0),%r28 + b lws_exit + ldo -EAGAIN(%r0),%r21 + +lws_fault: + ldo 1(%r0),%r28 + b lws_exit + ldo -EFAULT(%r0),%r21 + lws_exit_nosys: - ldo -ENOSYS(%r0),%r21 /* set errno */ + ldo -ENOSYS(%r0),%r21 /* Fall through: Return to userspace */ lws_exit: @@ -524,27 +573,19 @@ lws_exit: %r28 - Return prev through this register. %r21 - Kernel error code - If debugging is DISabled: - - %r21 has the following meanings: - + %r21 returns the following error codes: EAGAIN - CAS is busy, ldcw failed, try again. EFAULT - Read or write failed. - If debugging is enabled: - - EDEADLOCK - CAS called recursively. - EAGAIN && r28 == 1 - CAS is busy. Lock contended. - EAGAIN && r28 == 2 - CAS is busy. ldcw failed. - EFAULT - Read or write failed. + If EAGAIN is returned, %r28 indicates the busy reason: + r28 == 1 - CAS is busy. lock contended. + r28 == 2 - CAS is busy. ldcw failed. + r28 == 3 - CAS is busy. page fault. Scratch: r20, r28, r1 ****************************************************/ - /* Do not enable LWS debugging */ -#define ENABLE_LWS_DEBUG 0 - /* ELF64 Process entry path */ lws_compare_and_swap64: #ifdef CONFIG_64BIT @@ -557,61 +598,49 @@ lws_compare_and_swap64: b,n lws_exit_nosys #endif - /* ELF32 Process entry path */ + /* ELF32/ELF64 Process entry path */ lws_compare_and_swap32: #ifdef CONFIG_64BIT - /* Clip all the input registers */ + /* Wide mode user process? */ + bb,<,n %sp, 31, lws_compare_and_swap + + /* Clip all the input registers for 32-bit processes */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 #endif lws_compare_and_swap: - /* Load start of lock table */ - ldil L%lws_lock_start, %r20 - ldo R%lws_lock_start(%r20), %r28 + /* Trigger memory reference interruptions without writing to memory */ +1: ldw 0(%r26), %r28 + proberi (%r26), PRIV_USER, %r28 + comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ + nop +2: stbys,e %r0, 0(%r26) - /* Extract four bits from r26 and hash lock (Bits 4-7) */ - extru %r26, 27, 4, %r20 + /* Calculate 8-bit hash index from virtual address */ + extru_safe %r26, 27, 8, %r20 + + /* Load start of lock table */ + ldil L%lws_lock_start, %r28 + ldo R%lws_lock_start(%r28), %r28 - /* Find lock to use, the hash is either one of 0 to - 15, multiplied by 16 (keep it 16-byte aligned) + /* Find lock to use, the hash index is one of 0 to + 255, multiplied by 16 (keep it 16-byte aligned) and add to the lock table offset. */ shlw %r20, 4, %r20 add %r20, %r28, %r20 -# if ENABLE_LWS_DEBUG - /* - DEBUG, check for deadlock! - If the thread register values are the same - then we were the one that locked it last and - this is a recurisve call that will deadlock. - We *must* giveup this call and fail. - */ - ldw 4(%sr2,%r20), %r28 /* Load thread register */ - /* WARNING: If cr27 cycles to the same value we have problems */ - mfctl %cr27, %r21 /* Get current thread register */ - cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ - b lws_exit /* Return error! */ - ldo -EDEADLOCK(%r0), %r21 -cas_lock: - cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */ - ldo 1(%r0), %r28 /* 1st case */ - b lws_exit /* Contended... */ - ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -cas_nocontend: -# endif -/* ENABLE_LWS_DEBUG */ - rsm PSW_SM_I, %r0 /* Disable interrupts */ - /* COW breaks can cause contention on UP systems */ - LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ - cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ -cas_wouldblock: - ldo 2(%r0), %r28 /* 2nd case */ - ssm PSW_SM_I, %r0 - b lws_exit /* Contended... */ - ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ + + /* Try to acquire the lock */ + LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 + comclr,<> %r0, %r28, %r0 + b,n lws_wouldblock + + /* Disable page faults to prevent sleeping in critical region */ + lws_pagefault_disable %r21,%r28 /* prev = *addr; @@ -621,64 +650,35 @@ cas_wouldblock: */ /* NOTES: - This all works becuse intr_do_signal + This all works because intr_do_signal and schedule both check the return iasq and see that we are on the kernel page so this process is never scheduled off or is ever sent any signal of any sort, - thus it is wholly atomic from usrspaces + thus it is wholly atomic from usrspace's perspective */ -cas_action: -#if defined CONFIG_SMP && ENABLE_LWS_DEBUG - /* DEBUG */ - mfctl %cr27, %r1 - stw %r1, 4(%sr2,%r20) -#endif /* The load and store could fail */ -1: ldw 0(%r26), %r28 +3: ldw 0(%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw %r24, 0(%r26) - /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) -#if ENABLE_LWS_DEBUG - /* Clear thread register indicator */ - stw %r0, 4(%sr2,%r20) -#endif - /* Enable interrupts */ - ssm PSW_SM_I, %r0 - /* Return to userspace, set no error */ - b lws_exit - copy %r0, %r21 +4: stw %r24, 0(%r26) + b,n lws_exit_noerror -3: - /* Error occurred on load or store */ - /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) -#if ENABLE_LWS_DEBUG - stw %r0, 4(%sr2,%r20) -#endif - ssm PSW_SM_I, %r0 - b lws_exit - ldo -EFAULT(%r0),%r21 /* set errno */ - nop - nop - nop - nop + /* A fault occurred on load or stbys,e store */ +5: b,n lws_fault + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page) - /* Two exception table entries, one for the load, - the other for the store. Either return -EFAULT. - Each of the entries must be relocated. */ - ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) + /* A page fault occurred in critical region */ +6: b,n lws_pagefault + ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page) /*************************************************** New CAS implementation which uses pointers and variable size information. The value pointed by old and new MUST NOT change - while performing CAS. The lock only protect the value at %r26. + while performing CAS. The lock only protects the value at %r26. %r26 - Address to examine %r25 - Pointer to the value to check (old) @@ -687,25 +687,32 @@ cas_action: %r28 - Return non-zero on failure %r21 - Kernel error code - %r21 has the following meanings: - + %r21 returns the following error codes: EAGAIN - CAS is busy, ldcw failed, try again. EFAULT - Read or write failed. + If EAGAIN is returned, %r28 indicates the busy reason: + r28 == 1 - CAS is busy. lock contended. + r28 == 2 - CAS is busy. ldcw failed. + r28 == 3 - CAS is busy. page fault. + Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) ****************************************************/ - /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers. We don't need to clip %r23 as we - only use it for word operations */ + /* Wide mode user process? */ + bb,<,n %sp, 31, cas2_begin + + /* Clip the input registers for 32-bit processes. We don't + need to clip %r23 as we only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 #endif +cas2_begin: /* Check the validity of the size pointer */ subi,>>= 3, %r23, %r0 b,n lws_exit_nosys @@ -716,71 +723,81 @@ lws_compare_and_swap_2: blr %r29, %r0 nop - /* 8bit load */ -4: ldb 0(%r25), %r25 + /* 8-bit load */ +1: ldb 0(%r25), %r25 b cas2_lock_start -5: ldb 0(%r24), %r24 +2: ldb 0(%r24), %r24 nop nop nop nop nop - /* 16bit load */ -6: ldh 0(%r25), %r25 + /* 16-bit load */ +3: ldh 0(%r25), %r25 b cas2_lock_start -7: ldh 0(%r24), %r24 +4: ldh 0(%r24), %r24 nop nop nop nop nop - /* 32bit load */ -8: ldw 0(%r25), %r25 + /* 32-bit load */ +5: ldw 0(%r25), %r25 b cas2_lock_start -9: ldw 0(%r24), %r24 +6: ldw 0(%r24), %r24 nop nop nop nop nop - /* 64bit load */ + /* 64-bit load */ #ifdef CONFIG_64BIT -10: ldd 0(%r25), %r25 -11: ldd 0(%r24), %r24 +7: ldd 0(%r25), %r25 +8: ldd 0(%r24), %r24 #else /* Load old value into r22/r23 - high/low */ -10: ldw 0(%r25), %r22 -11: ldw 4(%r25), %r23 +7: ldw 0(%r25), %r22 +8: ldw 4(%r25), %r23 /* Load new value into fr4 for atomic store later */ -12: flddx 0(%r24), %fr4 +9: flddx 0(%r24), %fr4 #endif cas2_lock_start: - /* Load start of lock table */ - ldil L%lws_lock_start, %r20 - ldo R%lws_lock_start(%r20), %r28 + /* Trigger memory reference interruptions without writing to memory */ + copy %r26, %r28 + depi_safe 0, 31, 2, %r28 +10: ldw 0(%r28), %r1 + proberi (%r28), PRIV_USER, %r1 + comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ + nop +11: stbys,e %r0, 0(%r28) + + /* Calculate 8-bit hash index from virtual address */ + extru_safe %r26, 27, 8, %r20 - /* Extract four bits from r26 and hash lock (Bits 4-7) */ - extru %r26, 27, 4, %r20 + /* Load start of lock table */ + ldil L%lws_lock_start, %r28 + ldo R%lws_lock_start(%r28), %r28 - /* Find lock to use, the hash is either one of 0 to - 15, multiplied by 16 (keep it 16-byte aligned) + /* Find lock to use, the hash index is one of 0 to + 255, multiplied by 16 (keep it 16-byte aligned) and add to the lock table offset. */ shlw %r20, 4, %r20 add %r20, %r28, %r20 rsm PSW_SM_I, %r0 /* Disable interrupts */ - /* COW breaks can cause contention on UP systems */ - LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ - cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ -cas2_wouldblock: - ldo 2(%r0), %r28 /* 2nd case */ - ssm PSW_SM_I, %r0 - b lws_exit /* Contended... */ - ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ + + /* Try to acquire the lock */ + LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 + comclr,<> %r0, %r28, %r0 + b,n lws_wouldblock + + /* Disable page faults to prevent sleeping in critical region */ + lws_pagefault_disable %r21,%r28 /* prev = *addr; @@ -790,117 +807,501 @@ cas2_wouldblock: */ /* NOTES: - This all works becuse intr_do_signal + This all works because intr_do_signal and schedule both check the return iasq and see that we are on the kernel page so this process is never scheduled off or is ever sent any signal of any sort, - thus it is wholly atomic from usrspaces + thus it is wholly atomic from usrspace's perspective */ -cas2_action: + /* Jump to the correct function */ blr %r29, %r0 /* Set %r28 as non-zero for now */ ldo 1(%r0),%r28 - /* 8bit CAS */ -13: ldb 0(%r26), %r29 + /* 8-bit CAS */ +12: ldb 0(%r26), %r29 sub,= %r29, %r25, %r0 - b,n cas2_end -14: stb %r24, 0(%r26) - b cas2_end + b,n lws_exit_noerror +13: stb %r24, 0(%r26) + b lws_exit_noerror copy %r0, %r28 nop nop - /* 16bit CAS */ -15: ldh 0(%r26), %r29 + /* 16-bit CAS */ +14: ldh 0(%r26), %r29 sub,= %r29, %r25, %r0 - b,n cas2_end -16: sth %r24, 0(%r26) - b cas2_end + b,n lws_exit_noerror +15: sth %r24, 0(%r26) + b lws_exit_noerror copy %r0, %r28 nop nop - /* 32bit CAS */ -17: ldw 0(%r26), %r29 + /* 32-bit CAS */ +16: ldw 0(%r26), %r29 sub,= %r29, %r25, %r0 - b,n cas2_end -18: stw %r24, 0(%r26) - b cas2_end + b,n lws_exit_noerror +17: stw %r24, 0(%r26) + b lws_exit_noerror copy %r0, %r28 nop nop - /* 64bit CAS */ + /* 64-bit CAS */ #ifdef CONFIG_64BIT -19: ldd 0(%r26), %r29 +18: ldd 0(%r26), %r29 sub,*= %r29, %r25, %r0 - b,n cas2_end -20: std %r24, 0(%r26) + b,n lws_exit_noerror +19: std %r24, 0(%r26) copy %r0, %r28 #else /* Compare first word */ -19: ldw 0(%r26), %r29 +18: ldw 0(%r26), %r29 sub,= %r29, %r22, %r0 - b,n cas2_end + b,n lws_exit_noerror /* Compare second word */ -20: ldw 4(%r26), %r29 +19: ldw 4(%r26), %r29 sub,= %r29, %r23, %r0 - b,n cas2_end + b,n lws_exit_noerror /* Perform the store */ -21: fstdx %fr4, 0(%r26) +20: fstdx %fr4, 0(%r26) copy %r0, %r28 #endif + b lws_exit_noerror + copy %r0, %r28 -cas2_end: - /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) - /* Enable interrupts */ - ssm PSW_SM_I, %r0 - /* Return to userspace, set no error */ - b lws_exit - copy %r0, %r21 + /* A fault occurred on load or stbys,e store */ +30: b,n lws_fault + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page) +#ifndef CONFIG_64BIT + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page) +#endif -22: - /* Error occurred on load or store */ - /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) - ssm PSW_SM_I, %r0 + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page) + + /* A page fault occurred in critical region */ +31: b,n lws_pagefault + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page) +#ifndef CONFIG_64BIT + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page) +#endif + + + /*************************************************** + LWS atomic exchange. + + %r26 - Exchange address + %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) + %r24 - Address of new value + %r23 - Address of old value + %r28 - Return non-zero on failure + %r21 - Kernel error code + + %r21 returns the following error codes: + EAGAIN - CAS is busy, ldcw failed, try again. + EFAULT - Read or write failed. + + If EAGAIN is returned, %r28 indicates the busy reason: + r28 == 1 - CAS is busy. lock contended. + r28 == 2 - CAS is busy. ldcw failed. + r28 == 3 - CAS is busy. page fault. + + Scratch: r20, r1 + + ****************************************************/ + +lws_atomic_xchg: +#ifdef CONFIG_64BIT + /* Wide mode user process? */ + bb,<,n %sp, 31, atomic_xchg_begin + + /* Clip the input registers for 32-bit processes. We don't + need to clip %r23 as we only use it for word operations */ + depdi 0, 31, 32, %r26 + depdi 0, 31, 32, %r25 + depdi 0, 31, 32, %r24 + depdi 0, 31, 32, %r23 +#endif + +atomic_xchg_begin: + /* Check the validity of the size pointer */ + subi,>>= 3, %r25, %r0 + b,n lws_exit_nosys + + /* Jump to the functions which will load the old and new values into + registers depending on the their size */ + shlw %r25, 2, %r1 + blr %r1, %r0 + nop + + /* Perform exception checks */ + + /* 8-bit exchange */ +1: ldb 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 + b atomic_xchg_start +2: stbys,e %r0, 0(%r20) + + /* 16-bit exchange */ +3: ldh 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 + b atomic_xchg_start +4: stbys,e %r0, 0(%r20) + + /* 32-bit exchange */ +5: ldw 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop + b atomic_xchg_start +6: stbys,e %r0, 0(%r23) + nop + nop + + /* 64-bit exchange */ +#ifdef CONFIG_64BIT +7: ldd 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop +8: stdby,e %r0, 0(%r23) +#else +7: ldw 0(%r24), %r20 +8: ldw 4(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop + copy %r23, %r20 + depi_safe 0, 31, 2, %r20 +9: stbys,e %r0, 0(%r20) +10: stbys,e %r0, 4(%r20) +#endif + +atomic_xchg_start: + /* Trigger memory reference interruptions without writing to memory */ + copy %r26, %r28 + depi_safe 0, 31, 2, %r28 +11: ldw 0(%r28), %r1 +12: stbys,e %r0, 0(%r28) + + /* Calculate 8-bit hash index from virtual address */ + extru_safe %r26, 27, 8, %r20 + + /* Load start of lock table */ + ldil L%lws_lock_start, %r28 + ldo R%lws_lock_start(%r28), %r28 + + /* Find lock to use, the hash index is one of 0 to + 255, multiplied by 16 (keep it 16-byte aligned) + and add to the lock table offset. */ + shlw %r20, 4, %r20 + add %r20, %r28, %r20 + + rsm PSW_SM_I, %r0 /* Disable interrupts */ + + /* Try to acquire the lock */ + LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 + comclr,<> %r0, %r28, %r0 + b,n lws_wouldblock + + /* Disable page faults to prevent sleeping in critical region */ + lws_pagefault_disable %r21,%r28 + + /* NOTES: + This all works because intr_do_signal + and schedule both check the return iasq + and see that we are on the kernel page + so this process is never scheduled off + or is ever sent any signal of any sort, + thus it is wholly atomic from userspace's + perspective + */ + + /* Jump to the correct function */ + blr %r1, %r0 + /* Set %r28 as non-zero for now */ ldo 1(%r0),%r28 - b lws_exit - ldo -EFAULT(%r0),%r21 /* set errno */ - nop - nop - nop - - /* Exception table entries, for the load and store, return EFAULT. - Each of the entries must be relocated. */ - ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) + + /* 8-bit exchange */ +14: ldb 0(%r26), %r1 +15: stb %r1, 0(%r23) +15: ldb 0(%r24), %r1 +17: stb %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + nop + nop + + /* 16-bit exchange */ +18: ldh 0(%r26), %r1 +19: sth %r1, 0(%r23) +20: ldh 0(%r24), %r1 +21: sth %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + nop + nop + + /* 32-bit exchange */ +22: ldw 0(%r26), %r1 +23: stw %r1, 0(%r23) +24: ldw 0(%r24), %r1 +25: stw %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + nop + nop + + /* 64-bit exchange */ +#ifdef CONFIG_64BIT +26: ldd 0(%r26), %r1 +27: std %r1, 0(%r23) +28: ldd 0(%r24), %r1 +29: std %r1, 0(%r26) +#else +26: flddx 0(%r26), %fr4 +27: fstdx %fr4, 0(%r23) +28: flddx 0(%r24), %fr4 +29: fstdx %fr4, 0(%r26) +#endif + b lws_exit_noerror + copy %r0, %r28 + + /* A fault occurred on load or stbys,e store */ +30: b,n lws_fault + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page) #ifndef CONFIG_64BIT - ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) - ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page) #endif + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page) + + /* A page fault occurred in critical region */ +31: b,n lws_pagefault + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page) + + /*************************************************** + LWS atomic store. + + %r26 - Address to store + %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) + %r24 - Address of value to store + %r28 - Return non-zero on failure + %r21 - Kernel error code + + %r21 returns the following error codes: + EAGAIN - CAS is busy, ldcw failed, try again. + EFAULT - Read or write failed. + + If EAGAIN is returned, %r28 indicates the busy reason: + r28 == 1 - CAS is busy. lock contended. + r28 == 2 - CAS is busy. ldcw failed. + r28 == 3 - CAS is busy. page fault. + + Scratch: r20, r1 + + ****************************************************/ + +lws_atomic_store: +#ifdef CONFIG_64BIT + /* Wide mode user process? */ + bb,<,n %sp, 31, atomic_store_begin + + /* Clip the input registers for 32-bit processes. We don't + need to clip %r23 as we only use it for word operations */ + depdi 0, 31, 32, %r26 + depdi 0, 31, 32, %r25 + depdi 0, 31, 32, %r24 +#endif + +atomic_store_begin: + /* Check the validity of the size pointer */ + subi,>>= 3, %r25, %r0 + b,n lws_exit_nosys + + shlw %r25, 1, %r1 + blr %r1, %r0 + nop + + /* Perform exception checks */ + + /* 8-bit store */ +1: ldb 0(%r24), %r20 + b,n atomic_store_start + nop + nop + + /* 16-bit store */ +2: ldh 0(%r24), %r20 + b,n atomic_store_start + nop + nop + + /* 32-bit store */ +3: ldw 0(%r24), %r20 + b,n atomic_store_start + nop + nop + + /* 64-bit store */ +#ifdef CONFIG_64BIT +4: ldd 0(%r24), %r20 +#else +4: ldw 0(%r24), %r20 +5: ldw 4(%r24), %r20 +#endif + +atomic_store_start: + /* Trigger memory reference interruptions without writing to memory */ + copy %r26, %r28 + depi_safe 0, 31, 2, %r28 +6: ldw 0(%r28), %r1 +7: stbys,e %r0, 0(%r28) + + /* Calculate 8-bit hash index from virtual address */ + extru_safe %r26, 27, 8, %r20 + + /* Load start of lock table */ + ldil L%lws_lock_start, %r28 + ldo R%lws_lock_start(%r28), %r28 + + /* Find lock to use, the hash index is one of 0 to + 255, multiplied by 16 (keep it 16-byte aligned) + and add to the lock table offset. */ + shlw %r20, 4, %r20 + add %r20, %r28, %r20 + + rsm PSW_SM_I, %r0 /* Disable interrupts */ + + /* Try to acquire the lock */ + LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 + comclr,<> %r0, %r28, %r0 + b,n lws_wouldblock + + /* Disable page faults to prevent sleeping in critical region */ + lws_pagefault_disable %r21,%r28 + + /* NOTES: + This all works because intr_do_signal + and schedule both check the return iasq + and see that we are on the kernel page + so this process is never scheduled off + or is ever sent any signal of any sort, + thus it is wholly atomic from userspace's + perspective + */ + + /* Jump to the correct function */ + blr %r1, %r0 + /* Set %r28 as non-zero for now */ + ldo 1(%r0),%r28 + + /* 8-bit store */ +9: ldb 0(%r24), %r1 +10: stb %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + + /* 16-bit store */ +11: ldh 0(%r24), %r1 +12: sth %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + + /* 32-bit store */ +13: ldw 0(%r24), %r1 +14: stw %r1, 0(%r26) + b lws_exit_noerror + copy %r0, %r28 + + /* 64-bit store */ +#ifdef CONFIG_64BIT +15: ldd 0(%r24), %r1 +16: std %r1, 0(%r26) +#else +15: flddx 0(%r24), %fr4 +16: fstdx %fr4, 0(%r26) +#endif + b lws_exit_noerror + copy %r0, %r28 + + /* A fault occurred on load or stbys,e store */ +30: b,n lws_fault + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page) +#ifndef CONFIG_64BIT + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page) +#endif + + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page) + + /* A page fault occurred in critical region */ +31: b,n lws_pagefault + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page) + /* Make sure nothing else is placed on this page */ .align PAGE_SIZE END(linux_gateway_page) @@ -919,28 +1320,32 @@ ENTRY(end_linux_gateway_page) ENTRY(lws_table) LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ - LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ + LWS_ENTRY(compare_and_swap_2) /* 2 - Atomic 64bit CAS */ + LWS_ENTRY(atomic_xchg) /* 3 - Atomic Exchange */ + LWS_ENTRY(atomic_store) /* 4 - Atomic Store */ END(lws_table) /* End of lws table */ -#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry - .align 8 -ENTRY(sys_call_table) - .export sys_call_table,data #ifdef CONFIG_64BIT -#include <asm/syscall_table_c32.h> /* Compat syscalls */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #else -#include <asm/syscall_table_32.h> /* 32-bit native syscalls */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #endif +#define __SYSCALL(nr, entry) ASM_ULONG_INSN entry + .align 8 +ENTRY(sys_call_table) + .export sys_call_table,data +#include <asm/syscall_table_32.h> /* 32-bit syscalls */ END(sys_call_table) #ifdef CONFIG_64BIT +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) .align 8 ENTRY(sys_call_table64) -#include <asm/syscall_table_64.h> /* 64-bit native syscalls */ +#include <asm/syscall_table_64.h> /* 64-bit syscalls */ END(sys_call_table64) #endif -#undef __SYSCALL /* All light-weight-syscall atomic operations @@ -954,9 +1359,9 @@ END(sys_call_table64) .align L1_CACHE_BYTES ENTRY(lws_lock_start) /* lws locks */ - .rept 16 + .rept 256 /* Keep locks aligned at 16-bytes */ - .word 1 + .word __ARCH_SPIN_LOCK_UNLOCKED_VAL .word 0 .word 0 .word 0 @@ -965,5 +1370,3 @@ END(lws_lock_start) .previous .end - - diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile index c22a21c39f30..8440c16dfb22 100644 --- a/arch/parisc/kernel/syscalls/Makefile +++ b/arch/parisc/kernel/syscalls/Makefile @@ -2,54 +2,32 @@ kapi := arch/$(SRCARCH)/include/generated/asm uapi := arch/$(SRCARCH)/include/generated/uapi/asm -_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ - $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') +$(shell mkdir -p $(uapi) $(kapi)) -syscall := $(srctree)/$(src)/syscall.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@ -syshdr_abis_unistd_32 := common,32 -$(uapi)/unistd_32.h: $(syscall) $(syshdr) +$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abis_unistd_64 := common,64 -$(uapi)/unistd_64.h: $(syscall) $(syshdr) - $(call if_changed,syshdr) - -systbl_abis_syscall_table_32 := common,32 -$(kapi)/syscall_table_32.h: $(syscall) $(systbl) - $(call if_changed,systbl) - -systbl_abis_syscall_table_64 := common,64 -$(kapi)/syscall_table_64.h: $(syscall) $(systbl) - $(call if_changed,systbl) - -systbl_abis_syscall_table_c32 := common,32 -systbl_abi_syscall_table_c32 := c32 -$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) +$(kapi)/syscall_table_%.h: $(syscall) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_32.h unistd_64.h kapisyshdr-y += syscall_table_32.h \ - syscall_table_64.h \ - syscall_table_c32.h + syscall_table_64.h -targets += $(uapisyshdr-y) $(kapisyshdr-y) +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) PHONY += all -all: $(addprefix $(uapi)/,$(uapisyshdr-y)) -all: $(addprefix $(kapi)/,$(kapisyshdr-y)) +all: $(uapisyshdr-y) $(kapisyshdr-y) @: diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 9bbd2f9f56c8..39bdacaa530b 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -20,7 +20,8 @@ 10 common unlink sys_unlink 11 common execve sys_execve compat_sys_execve 12 common chdir sys_chdir -13 common time sys_time compat_sys_time +13 32 time sys_time32 +13 64 time sys_time 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown @@ -28,16 +29,18 @@ 18 common stat sys_newstat compat_sys_newstat 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid -21 common mount sys_mount compat_sys_mount +21 common mount sys_mount 22 common bind sys_bind 23 common setuid sys_setuid 24 common getuid sys_getuid -25 common stime sys_stime compat_sys_stime +25 32 stime sys_stime32 +25 64 stime sys_stime 26 common ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm 28 common fstat sys_newfstat compat_sys_newfstat 29 common pause sys_pause -30 common utime sys_utime compat_sys_utime +30 32 utime sys_utime32 +30 64 utime sys_utime 31 common connect sys_connect 32 common listen sys_listen 33 common access sys_access @@ -105,7 +108,7 @@ 95 common fchown sys_fchown 96 common getpriority sys_getpriority 97 common setpriority sys_setpriority -98 common recv sys_recv +98 common recv sys_recv compat_sys_recv 99 common statfs sys_statfs compat_sys_statfs 100 common fstatfs sys_fstatfs compat_sys_fstatfs 101 common stat64 sys_stat64 @@ -128,12 +131,13 @@ 116 common sysinfo sys_sysinfo compat_sys_sysinfo 117 common shutdown sys_shutdown 118 common fsync sys_fsync -119 common madvise sys_madvise +119 common madvise parisc_madvise 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile -123 common recvfrom sys_recvfrom -124 common adjtimex sys_adjtimex compat_sys_adjtimex +123 common recvfrom sys_recvfrom compat_sys_recvfrom +124 32 adjtimex sys_adjtimex_time32 +124 64 adjtimex sys_adjtimex 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask # 127 was create_module @@ -143,7 +147,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 32 personality parisc_personality 136 64 personality sys_personality @@ -155,11 +159,11 @@ 142 common _newselect sys_select compat_sys_select 143 common flock sys_flock 144 common msync sys_msync -145 common readv sys_readv compat_sys_readv -146 common writev sys_writev compat_sys_writev +145 common readv sys_readv +146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall @@ -171,8 +175,10 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep compat_sys_nanosleep +161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +161 64 sched_rr_get_interval sys_sched_rr_get_interval +162 32 nanosleep sys_nanosleep_time32 +162 64 nanosleep sys_nanosleep 163 common mremap sys_mremap 164 common setresuid sys_setresuid 165 common getresuid sys_getresuid @@ -187,12 +193,13 @@ 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +177 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +177 64 rt_sigtimedwait sys_rt_sigtimedwait 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 180 common chown sys_chown -181 common setsockopt sys_setsockopt compat_sys_setsockopt -182 common getsockopt sys_getsockopt compat_sys_getsockopt +181 common setsockopt sys_setsockopt sys_setsockopt +182 common getsockopt sys_getsockopt sys_getsockopt 183 common sendmsg sys_sendmsg compat_sys_sendmsg 184 common recvmsg sys_recvmsg compat_sys_recvmsg 185 common semop sys_semop @@ -223,29 +230,34 @@ 207 64 readahead sys_readahead 208 common tkill sys_tkill 209 common sendfile64 sys_sendfile64 compat_sys_sendfile64 -210 common futex sys_futex compat_sys_futex +210 32 futex sys_futex_time32 +210 64 futex sys_futex 211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity # 213 was set_thread_area # 214 was get_thread_area 215 common io_setup sys_io_setup compat_sys_io_setup 216 common io_destroy sys_io_destroy -217 common io_getevents sys_io_getevents compat_sys_io_getevents +217 32 io_getevents sys_io_getevents_time32 +217 64 io_getevents sys_io_getevents 218 common io_submit sys_io_submit compat_sys_io_submit 219 common io_cancel sys_io_cancel # 220 was alloc_hugepages # 221 was free_hugepages 222 common exit_group sys_exit_group -223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +223 common lookup_dcookie sys_ni_syscall 224 common epoll_create sys_epoll_create 225 common epoll_ctl sys_epoll_ctl 226 common epoll_wait sys_epoll_wait 227 common remap_file_pages sys_remap_file_pages -228 common semtimedop sys_semtimedop compat_sys_semtimedop +228 32 semtimedop sys_semtimedop_time32 +228 64 semtimedop sys_semtimedop 229 common mq_open sys_mq_open compat_sys_mq_open 230 common mq_unlink sys_mq_unlink -231 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -232 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +231 32 mq_timedsend sys_mq_timedsend_time32 +231 64 mq_timedsend sys_mq_timedsend +232 32 mq_timedreceive sys_mq_timedreceive_time32 +232 64 mq_timedreceive sys_mq_timedreceive 233 common mq_notify sys_mq_notify compat_sys_mq_notify 234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 235 common waitid sys_waitid compat_sys_waitid @@ -265,18 +277,24 @@ 248 common lremovexattr sys_lremovexattr 249 common fremovexattr sys_fremovexattr 250 common timer_create sys_timer_create compat_sys_timer_create -251 common timer_settime sys_timer_settime compat_sys_timer_settime -252 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +251 32 timer_settime sys_timer_settime32 +251 64 timer_settime sys_timer_settime +252 32 timer_gettime sys_timer_gettime32 +252 64 timer_gettime sys_timer_gettime 253 common timer_getoverrun sys_timer_getoverrun 254 common timer_delete sys_timer_delete -255 common clock_settime sys_clock_settime compat_sys_clock_settime -256 common clock_gettime sys_clock_gettime compat_sys_clock_gettime -257 common clock_getres sys_clock_getres compat_sys_clock_getres -258 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +255 32 clock_settime sys_clock_settime32 +255 64 clock_settime sys_clock_settime +256 32 clock_gettime sys_clock_gettime32 +256 64 clock_gettime sys_clock_gettime +257 32 clock_getres sys_clock_getres_time32 +257 64 clock_getres sys_clock_getres +258 32 clock_nanosleep sys_clock_nanosleep_time32 +258 64 clock_nanosleep sys_clock_nanosleep 259 common tgkill sys_tgkill -260 common mbind sys_mbind compat_sys_mbind -261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +260 common mbind sys_mbind +261 common get_mempolicy sys_get_mempolicy +262 common set_mempolicy sys_set_mempolicy # 263 was vserver 264 common add_key sys_add_key 265 common request_key sys_request_key @@ -287,13 +305,16 @@ 270 common inotify_add_watch sys_inotify_add_watch 271 common inotify_rm_watch sys_inotify_rm_watch 272 common migrate_pages sys_migrate_pages -273 common pselect6 sys_pselect6 compat_sys_pselect6 -274 common ppoll sys_ppoll compat_sys_ppoll +273 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +273 64 pselect6 sys_pselect6 +274 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +274 64 ppoll sys_ppoll 275 common openat sys_openat compat_sys_openat 276 common mkdirat sys_mkdirat 277 common mknodat sys_mknodat 278 common fchownat sys_fchownat -279 common futimesat sys_futimesat compat_sys_futimesat +279 32 futimesat sys_futimesat_time32 +279 64 futimesat sys_futimesat 280 common fstatat64 sys_fstatat64 281 common unlinkat sys_unlinkat 282 common renameat sys_renameat @@ -309,50 +330,56 @@ 292 32 sync_file_range parisc_sync_file_range 292 64 sync_file_range sys_sync_file_range 293 common tee sys_tee -294 common vmsplice sys_vmsplice compat_sys_vmsplice -295 common move_pages sys_move_pages compat_sys_move_pages +294 common vmsplice sys_vmsplice +295 common move_pages sys_move_pages 296 common getcpu sys_getcpu 297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 298 common statfs64 sys_statfs64 compat_sys_statfs64 299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 300 common kexec_load sys_kexec_load compat_sys_kexec_load -301 common utimensat sys_utimensat compat_sys_utimensat +301 32 utimensat sys_utimensat_time32 +301 64 utimensat sys_utimensat 302 common signalfd sys_signalfd compat_sys_signalfd # 303 was timerfd 304 common eventfd sys_eventfd 305 32 fallocate parisc_fallocate 305 64 fallocate sys_fallocate -306 common timerfd_create sys_timerfd_create -307 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -308 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime -309 common signalfd4 sys_signalfd4 compat_sys_signalfd4 -310 common eventfd2 sys_eventfd2 +306 common timerfd_create parisc_timerfd_create +307 32 timerfd_settime sys_timerfd_settime32 +307 64 timerfd_settime sys_timerfd_settime +308 32 timerfd_gettime sys_timerfd_gettime32 +308 64 timerfd_gettime sys_timerfd_gettime +309 common signalfd4 parisc_signalfd4 parisc_compat_signalfd4 +310 common eventfd2 parisc_eventfd2 311 common epoll_create1 sys_epoll_create1 312 common dup3 sys_dup3 -313 common pipe2 sys_pipe2 -314 common inotify_init1 sys_inotify_init1 +313 common pipe2 parisc_pipe2 +314 common inotify_init1 parisc_inotify_init1 315 common preadv sys_preadv compat_sys_preadv 316 common pwritev sys_pwritev compat_sys_pwritev 317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 318 common perf_event_open sys_perf_event_open -319 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +319 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +319 64 recvmmsg sys_recvmmsg 320 common accept4 sys_accept4 321 common prlimit64 sys_prlimit64 322 common fanotify_init sys_fanotify_init -323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark -324 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark +324 32 clock_adjtime sys_clock_adjtime32 +324 64 clock_adjtime sys_clock_adjtime 325 common name_to_handle_at sys_name_to_handle_at 326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 327 common syncfs sys_syncfs 328 common setns sys_setns 329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg -330 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -331 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +330 common process_vm_readv sys_process_vm_readv +331 common process_vm_writev sys_process_vm_writev 332 common kcmp sys_kcmp 333 common finit_module sys_finit_module 334 common sched_setattr sys_sched_setattr 335 common sched_getattr sys_sched_getattr -336 common utimes sys_utimes compat_sys_utimes +336 32 utimes sys_utimes_time32 +336 64 utimes sys_utimes 337 common renameat2 sys_renameat2 338 common seccomp sys_seccomp 339 common getrandom sys_getrandom @@ -360,10 +387,85 @@ 341 common bpf sys_bpf 342 common execveat sys_execveat compat_sys_execveat 343 common membarrier sys_membarrier -344 common userfaultfd sys_userfaultfd +344 common userfaultfd parisc_userfaultfd 345 common mlock2 sys_mlock2 346 common copy_file_range sys_copy_file_range 347 common preadv2 sys_preadv2 compat_sys_preadv2 348 common pwritev2 sys_pwritev2 compat_sys_pwritev2 349 common statx sys_statx -350 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +350 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +350 64 io_pgetevents sys_io_pgetevents +351 common pkey_mprotect sys_pkey_mprotect +352 common pkey_alloc sys_pkey_alloc +353 common pkey_free sys_pkey_free +354 common rseq sys_rseq +355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load +356 common cacheflush sys_cacheflush +# up to 402 is unassigned and reserved for arch specific syscalls +403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 32 clock_settime64 sys_clock_settime sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 32 timer_settime64 sys_timer_settime sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3_wrapper +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal +463 common setxattrat sys_setxattrat +464 common getxattrat sys_getxattrat +465 common listxattrat sys_listxattrat +466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr +468 common file_getattr sys_file_getattr +469 common file_setattr sys_file_setattr +470 common listns sys_listns diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 50242b747d7c..000000000000 --- a/arch/parisc/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */" "${fileguard}" -) > "$out" diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 45b5bae26240..000000000000 --- a/arch/parisc/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index a1e772f909cb..c17e2249115f 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -1,167 +1,119 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/time.c + * Common time service routines for parisc machines. + * based on arch/loongarch/kernel/time.c * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King - * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills + * Copyright (C) 2024 Helge Deller <deller@gmx.de> */ -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/rtc.h> -#include <linux/sched.h> -#include <linux/sched/clock.h> -#include <linux/sched_clock.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/time.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/export.h> #include <linux/init.h> -#include <linux/smp.h> -#include <linux/profile.h> -#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/sched_clock.h> +#include <linux/spinlock.h> +#include <linux/rtc.h> #include <linux/platform_device.h> -#include <linux/ftrace.h> +#include <asm/processor.h> -#include <linux/uaccess.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/param.h> -#include <asm/pdc.h> -#include <asm/led.h> +static u64 cr16_clock_freq; +static unsigned long clocktick; -#include <linux/timex.h> +int time_keeper_id; /* CPU used for timekeeping */ -static unsigned long clocktick __read_mostly; /* timer cycles per tick */ +static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device); -/* - * We keep time on PA-RISC Linux by using the Interval Timer which is - * a pair of registers; one is read-only and one is write-only; both - * accessed through CR16. The read-only register is 32 or 64 bits wide, - * and increments by 1 every CPU clock tick. The architecture only - * guarantees us a rate between 0.5 and 2, but all implementations use a - * rate of 1. The write-only register is 32-bits wide. When the lowest - * 32 bits of the read-only register compare equal to the write-only - * register, it raises a maskable external interrupt. Each processor has - * an Interval Timer of its own and they are not synchronised. - * - * We want to generate an interrupt every 1/HZ seconds. So we program - * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data - * is programmed with the intended time of the next tick. We can be - * held off for an arbitrarily long period of time by interrupts being - * disabled, so we may miss one or more ticks. - */ -irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) +static void parisc_event_handler(struct clock_event_device *dev) { - unsigned long now; - unsigned long next_tick; - unsigned long ticks_elapsed = 0; - unsigned int cpu = smp_processor_id(); - struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); - - /* gcc can optimize for "read-only" case with a local clocktick */ - unsigned long cpt = clocktick; - - profile_tick(CPU_PROFILING); - - /* Initialize next_tick to the old expected tick time. */ - next_tick = cpuinfo->it_value; - - /* Calculate how many ticks have elapsed. */ - now = mfctl(16); - do { - ++ticks_elapsed; - next_tick += cpt; - } while (next_tick - now > cpt); - - /* Store (in CR16 cycles) up to when we are accounting right now. */ - cpuinfo->it_value = next_tick; - - /* Go do system house keeping. */ - if (cpu == 0) - xtime_update(ticks_elapsed); - - update_process_times(user_mode(get_irq_regs())); - - /* Skip clockticks on purpose if we know we would miss those. - * The new CR16 must be "later" than current CR16 otherwise - * itimer would not fire until CR16 wrapped - e.g 4 seconds - * later on a 1Ghz processor. We'll account for the missed - * ticks on the next timer interrupt. - * We want IT to fire modulo clocktick even if we miss/skip some. - * But those interrupts don't in fact get delivered that regularly. - * - * "next_tick - now" will always give the difference regardless - * if one or the other wrapped. If "now" is "bigger" we'll end up - * with a very large unsigned number. - */ - now = mfctl(16); - while (next_tick - now > cpt) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. - * Only bottom 32-bits of next_tick are writable in CR16! - * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 8000 cycles) to the - * next cycle, simply skip it. - */ - if (next_tick - now <= 8000) - next_tick += cpt; - mtctl(next_tick, 16); - - return IRQ_HANDLED; } +static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt) +{ + unsigned long new_cr16; + + new_cr16 = mfctl(16) + delta; + mtctl(new_cr16, 16); -unsigned long profile_pc(struct pt_regs *regs) + return 0; +} + +irqreturn_t timer_interrupt(int irq, void *data) { - unsigned long pc = instruction_pointer(regs); + struct clock_event_device *cd; + int cpu = smp_processor_id(); - if (regs->gr[0] & PSW_N) - pc -= 4; + cd = &per_cpu(parisc_clockevent_device, cpu); -#ifdef CONFIG_SMP - if (in_lock_functions(pc)) - pc = regs->gr[2]; -#endif + if (clockevent_state_periodic(cd)) + parisc_timer_next_event(clocktick, cd); - return pc; + if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd)) + cd->event_handler(cd); + + return IRQ_HANDLED; } -EXPORT_SYMBOL(profile_pc); +static int parisc_set_state_oneshot(struct clock_event_device *evt) +{ + parisc_timer_next_event(clocktick, evt); -/* clock source code */ + return 0; +} -static u64 notrace read_cr16(struct clocksource *cs) +static int parisc_set_state_periodic(struct clock_event_device *evt) { - return get_cycles(); + parisc_timer_next_event(clocktick, evt); + + return 0; } -static struct clocksource clocksource_cr16 = { - .name = "cr16", - .rating = 300, - .read = read_cr16, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; +static int parisc_set_state_shutdown(struct clock_event_device *evt) +{ + return 0; +} -void __init start_cpu_itimer(void) +void parisc_clockevent_init(void) { unsigned int cpu = smp_processor_id(); - unsigned long next_tick = mfctl(16) + clocktick; + unsigned long min_delta = 0x600; /* XXX */ + unsigned long max_delta = (1UL << (BITS_PER_LONG - 1)); + struct clock_event_device *cd; + + cd = &per_cpu(parisc_clockevent_device, cpu); + + cd->name = "cr16_clockevent"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_PERCPU; + + cd->irq = TIMER_IRQ; + cd->rating = 320; + cd->cpumask = cpumask_of(cpu); + cd->set_state_oneshot = parisc_set_state_oneshot; + cd->set_state_oneshot_stopped = parisc_set_state_shutdown; + cd->set_state_periodic = parisc_set_state_periodic; + cd->set_state_shutdown = parisc_set_state_shutdown; + cd->set_next_event = parisc_timer_next_event; + cd->event_handler = parisc_event_handler; + + clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta); +} - mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ +unsigned long notrace profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + + if (regs->gr[0] & PSW_N) + pc -= 4; - per_cpu(cpu_data, cpu).it_value = next_tick; +#ifdef CONFIG_SMP + if (in_lock_functions(pc)) + pc = regs->gr[2]; +#endif + + return pc; } +EXPORT_SYMBOL(profile_pc); #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) @@ -180,9 +132,16 @@ static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm) { time64_t secs = rtc_tm_to_time64(tm); - - if (pdc_tod_set(secs, 0) < 0) + int ret; + + /* hppa has Y2K38 problem: pdc_tod_set() takes an u32 value! */ + ret = pdc_tod_set(secs, 0); + if (ret != 0) { + pr_warn("pdc_tod_set(%lld) returned error %d\n", secs, ret); + if (ret == PDC_INVALID_ARG) + return -EINVAL; return -EOPNOTSUPP; + } return 0; } @@ -218,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts) } } - static u64 notrace read_cr16_sched_clock(void) { return get_cycles(); } +static u64 notrace read_cr16(struct clocksource *cs) +{ + return get_cycles(); +} + +static struct clocksource clocksource_cr16 = { + .name = "cr16", + .rating = 300, + .read = read_cr16, + .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_VALID_FOR_HRES | + CLOCK_SOURCE_MUST_VERIFY | + CLOCK_SOURCE_VERIFY_PERCPU, +}; + /* * timer interrupt and sched_clock() initialization @@ -231,52 +205,14 @@ static u64 notrace read_cr16_sched_clock(void) void __init time_init(void) { - unsigned long cr16_hz; - - clocktick = (100 * PAGE0->mem_10msec) / HZ; - start_cpu_itimer(); /* get CPU 0 started */ - - cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ + cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */ + clocktick = cr16_clock_freq / HZ; /* register as sched_clock source */ - sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); -} - -static int __init init_cr16_clocksource(void) -{ - /* - * The cr16 interval timers are not syncronized across CPUs on - * different sockets, so mark them unstable and lower rating on - * multi-socket SMP systems. - */ - if (num_online_cpus() > 1 && !running_on_qemu) { - int cpu; - unsigned long cpu0_loc; - cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - if ((cpu0_loc != 0) && - (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) - continue; - - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - break; - } - } + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq); - /* XXX: We may want to mark sched_clock stable here if cr16 clocks are - * in sync: - * (clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */ + parisc_clockevent_init(); /* register at clocksource framework */ - clocksource_register_hz(&clocksource_cr16, - 100 * PAGE0->mem_10msec); - - return 0; + clocksource_register_hz(&clocksource_cr16, cr16_clock_freq); } - -device_initcall(init_cr16_clocksource); diff --git a/arch/parisc/kernel/toc.c b/arch/parisc/kernel/toc.c new file mode 100644 index 000000000000..e4b48d07afbd --- /dev/null +++ b/arch/parisc/kernel/toc.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/kernel.h> +#include <linux/kgdb.h> +#include <linux/printk.h> +#include <linux/sched/debug.h> +#include <linux/delay.h> +#include <linux/reboot.h> + +#include <asm/pdc.h> +#include <asm/pdc_chassis.h> +#include <asm/ldcw.h> +#include <asm/processor.h> + +static unsigned int __aligned(16) toc_lock = 1; +DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack) __visible; + +static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc) +{ + int i; + + regs->gr[0] = (unsigned long)toc->cr[22]; + + for (i = 1; i < 32; i++) + regs->gr[i] = (unsigned long)toc->gr[i]; + + for (i = 0; i < 8; i++) + regs->sr[i] = (unsigned long)toc->sr[i]; + + regs->iasq[0] = (unsigned long)toc->cr[17]; + regs->iasq[1] = (unsigned long)toc->iasq_back; + regs->iaoq[0] = (unsigned long)toc->cr[18]; + regs->iaoq[1] = (unsigned long)toc->iaoq_back; + + regs->sar = (unsigned long)toc->cr[11]; + regs->iir = (unsigned long)toc->cr[19]; + regs->isr = (unsigned long)toc->cr[20]; + regs->ior = (unsigned long)toc->cr[21]; +} + +static void toc11_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_11 *toc) +{ + int i; + + regs->gr[0] = toc->cr[22]; + + for (i = 1; i < 32; i++) + regs->gr[i] = toc->gr[i]; + + for (i = 0; i < 8; i++) + regs->sr[i] = toc->sr[i]; + + regs->iasq[0] = toc->cr[17]; + regs->iasq[1] = toc->iasq_back; + regs->iaoq[0] = toc->cr[18]; + regs->iaoq[1] = toc->iaoq_back; + + regs->sar = toc->cr[11]; + regs->iir = toc->cr[19]; + regs->isr = toc->cr[20]; + regs->ior = toc->cr[21]; +} + +void notrace __noreturn __cold toc_intr(struct pt_regs *regs) +{ + struct pdc_toc_pim_20 pim_data20; + struct pdc_toc_pim_11 pim_data11; + + /* verify we wrote regs to the correct stack */ + BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id())); + + if (boot_cpu_data.cpu_type >= pcxu) { + if (pdc_pim_toc20(&pim_data20)) + panic("Failed to get PIM data"); + toc20_to_pt_regs(regs, &pim_data20); + } else { + if (pdc_pim_toc11(&pim_data11)) + panic("Failed to get PIM data"); + toc11_to_pt_regs(regs, &pim_data11); + } + +#ifdef CONFIG_KGDB + nmi_enter(); + + if (atomic_read(&kgdb_active) != -1) + kgdb_nmicallback(raw_smp_processor_id(), regs); + kgdb_handle_exception(9, SIGTRAP, 0, regs); +#endif + + /* serialize output, otherwise all CPUs write backtrace at once */ + while (__ldcw(&toc_lock) == 0) + ; /* wait */ + show_regs(regs); + toc_lock = 1; /* release lock for next CPU */ + + if (raw_smp_processor_id() != 0) + while (1) ; /* all but monarch CPU will wait endless. */ + + /* give other CPUs time to show their backtrace */ + mdelay(2000); + + machine_restart("TOC"); + + /* should never reach this */ + panic("TOC"); +} + +static __init int setup_toc(void) +{ + unsigned int csum = 0; + unsigned long toc_code = (unsigned long)dereference_function_descriptor(toc_handler); + int i; + + PAGE0->vec_toc = __pa(toc_code) & 0xffffffff; +#ifdef CONFIG_64BIT + PAGE0->vec_toc_hi = __pa(toc_code) >> 32; +#endif + PAGE0->vec_toclen = toc_handler_size; + + for (i = 0; i < toc_handler_size/4; i++) + csum += ((u32 *)toc_code)[i]; + toc_handler_csum = -csum; + pr_info("TOC handler registered\n"); + return 0; +} +early_initcall(setup_toc); diff --git a/arch/parisc/kernel/toc_asm.S b/arch/parisc/kernel/toc_asm.S new file mode 100644 index 000000000000..570f5cef526e --- /dev/null +++ b/arch/parisc/kernel/toc_asm.S @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* TOC (Transfer of Control) handler. */ + + .level 1.1 + +#include <asm/assembly.h> +#include <linux/threads.h> +#include <linux/linkage.h> + + .text + .import toc_intr,code + .import toc_stack,data + .align 16 +ENTRY_CFI(toc_handler) + load32 PA(toc_stack),%sp + +#ifdef CONFIG_SMP + /* get per-cpu toc_stack address. */ + mfctl %cr30, %r1 + tophys %r1,%r2 /* task_struct */ + LDREG TASK_TI_CPU(%r2),%r4 /* cpu */ + load32 PA(__per_cpu_offset),%r1 + LDREGX %r4(%r1),%r4 + add %r4,%sp,%sp +#endif + + /* + * setup pt_regs on stack and save the + * floating point registers. PIM_TOC doesn't + * save fp registers, so we're doing it here. + */ + copy %sp,%arg0 + ldo PT_SZ_ALGN(%sp), %sp + + /* clear pt_regs */ + copy %arg0,%r1 +0: cmpb,<<,n %r1,%sp,0b + stw,ma %r0,4(%r1) + + ldo PT_FR0(%arg0),%r25 + save_fp %r25 + + /* go virtual */ + load32 PA(swapper_pg_dir),%r4 + mtctl %r4,%cr24 + mtctl %r4,%cr25 + + /* Clear sr4-sr7 */ + mtsp %r0, %sr4 + mtsp %r0, %sr5 + mtsp %r0, %sr6 + mtsp %r0, %sr7 + + tovirt_r1 %sp + tovirt_r1 %arg0 + virt_map + + loadgp + +#ifdef CONFIG_64BIT + ldo -16(%sp),%r29 +#endif + load32 toc_intr,%r1 + be 0(%sr7,%r1) + nop +ENDPROC_CFI(toc_handler) + + /* + * keep this checksum here, as it is part of the toc_handler + * spanned by toc_handler_size (all words in toc_handler are + * added in PDC and the sum must equal to zero. + */ +SYM_DATA(toc_handler_csum, .long 0) +SYM_DATA(toc_handler_size, .long . - toc_handler) diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index 0a10e4ddc528..b9d845e314f8 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c @@ -13,56 +13,21 @@ #include <linux/percpu.h> #include <linux/sched.h> #include <linux/sched/topology.h> +#include <linux/cpu.h> #include <asm/topology.h> +#include <asm/sections.h> - /* - * cpu topology table - */ -struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly; -EXPORT_SYMBOL_GPL(cpu_topology); - -const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_topology[cpu].core_sibling; -} - -static void update_siblings_masks(unsigned int cpuid) -{ - struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; - - /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id != cpu_topo->socket_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id != cpu_topo->core_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); - } - smp_wmb(); -} - -static int dualcores_found __initdata; +static DEFINE_PER_CPU(struct cpu, cpu_devices); /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array */ -void __init store_cpu_topology(unsigned int cpuid) +void store_cpu_topology(unsigned int cpuid) { - struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid]; + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; struct cpuinfo_parisc *p; int max_socket = -1; unsigned long cpu; @@ -71,6 +36,12 @@ void __init store_cpu_topology(unsigned int cpuid) if (cpuid_topo->core_id != -1) return; +#ifdef CONFIG_HOTPLUG_CPU + per_cpu(cpu_devices, cpuid).hotpluggable = 1; +#endif + if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid)) + pr_warn("Failed to register CPU%d device", cpuid); + /* create cpu topology mapping */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; @@ -86,57 +57,31 @@ void __init store_cpu_topology(unsigned int cpuid) cpuid_topo->core_id = cpu_topology[cpu].core_id; if (p->cpu_loc) { cpuid_topo->core_id++; - cpuid_topo->socket_id = cpu_topology[cpu].socket_id; - dualcores_found = 1; + cpuid_topo->package_id = cpu_topology[cpu].package_id; continue; } } - if (cpuid_topo->socket_id == -1) - max_socket = max(max_socket, cpu_topology[cpu].socket_id); + if (cpuid_topo->package_id == -1) + max_socket = max(max_socket, cpu_topology[cpu].package_id); } - if (cpuid_topo->socket_id == -1) - cpuid_topo->socket_id = max_socket + 1; + if (cpuid_topo->package_id == -1) + cpuid_topo->package_id = max_socket + 1; update_siblings_masks(cpuid); - pr_info("CPU%u: thread %d, cpu %d, socket %d\n", - cpuid, cpu_topology[cpuid].thread_id, + pr_info("CPU%u: cpu core %d of socket %d\n", + cpuid, cpu_topology[cpuid].core_id, - cpu_topology[cpuid].socket_id); + cpu_topology[cpuid].package_id); } -static struct sched_domain_topology_level parisc_mc_topology[] = { -#ifdef CONFIG_SCHED_MC - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -#endif - - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ void __init init_cpu_topology(void) { - unsigned int cpu; - - /* init core mask and capacity */ - for_each_possible_cpu(cpu) { - struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]); - - cpu_topo->thread_id = -1; - cpu_topo->core_id = -1; - cpu_topo->socket_id = -1; - cpumask_clear(&cpu_topo->core_sibling); - cpumask_clear(&cpu_topo->thread_sibling); - } - smp_wmb(); - - /* Set scheduler topology descriptor */ - if (dualcores_found) - set_sched_topology(parisc_mc_topology); + reset_cpu_topology(); } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 472a818e8c17..4c7c5df80bd0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -29,12 +29,15 @@ #include <linux/bug.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> +#include <linux/kdebug.h> +#include <linux/kfence.h> +#include <linux/perf_event.h> #include <asm/assembly.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/traps.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/pdc.h> @@ -42,11 +45,19 @@ #include <asm/unwind.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> +#include <linux/kgdb.h> +#include <linux/kprobes.h> + +#include "unaligned.h" + +#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) +#include <asm/spinlock.h> +#endif #include "../math-emu/math-emu.h" /* for handle_fpe() */ static void parisc_show_stack(struct task_struct *task, - struct pt_regs *regs); + struct pt_regs *regs, const char *loglvl); static int printbinary(char *buf, unsigned long x, int nbits) { @@ -72,7 +83,7 @@ static int printbinary(char *buf, unsigned long x, int nbits) lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ (r)[(x)+2], (r)[(x)+3]) -static void print_gr(char *level, struct pt_regs *regs) +static void print_gr(const char *level, struct pt_regs *regs) { int i; char buf[64]; @@ -86,7 +97,7 @@ static void print_gr(char *level, struct pt_regs *regs) PRINTREGS(level, regs->gr, "r", RFMT, i); } -static void print_fr(char *level, struct pt_regs *regs) +static void print_fr(const char *level, struct pt_regs *regs) { int i; char buf[64]; @@ -116,7 +127,7 @@ static void print_fr(char *level, struct pt_regs *regs) void show_regs(struct pt_regs *regs) { int i, user; - char *level; + const char *level; unsigned long cr30, cr31; user = user_mode(regs); @@ -140,7 +151,7 @@ void show_regs(struct pt_regs *regs) printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", level, regs->iir, regs->isr, regs->ior); printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", - level, current_thread_info()->cpu, cr30, cr31); + level, task_cpu(current), cr30, cr31); printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); if (user) { @@ -152,7 +163,7 @@ void show_regs(struct pt_regs *regs) printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); - parisc_show_stack(current, regs); + parisc_show_stack(current, regs, KERN_DEFAULT); } } @@ -167,37 +178,37 @@ static DEFINE_RATELIMIT_STATE(_hppa_rs, } -static void do_show_stack(struct unwind_frame_info *info) +static void do_show_stack(struct unwind_frame_info *info, const char *loglvl) { int i = 1; - printk(KERN_CRIT "Backtrace:\n"); + printk("%sBacktrace:\n", loglvl); while (i <= MAX_UNWIND_ENTRIES) { if (unwind_once(info) < 0 || info->ip == 0) break; if (__kernel_text_address(info->ip)) { - printk(KERN_CRIT " [<" RFMT ">] %pS\n", - info->ip, (void *) info->ip); + printk("%s [<" RFMT ">] %pS\n", + loglvl, info->ip, (void *) info->ip); i++; } } - printk(KERN_CRIT "\n"); + printk("%s\n", loglvl); } static void parisc_show_stack(struct task_struct *task, - struct pt_regs *regs) + struct pt_regs *regs, const char *loglvl) { struct unwind_frame_info info; unwind_frame_init_task(&info, task, regs); - do_show_stack(&info); + do_show_stack(&info, loglvl); } -void show_stack(struct task_struct *t, unsigned long *sp) +void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl) { - parisc_show_stack(t, NULL); + parisc_show_stack(t, NULL, loglvl); } int is_valid_bugaddr(unsigned long iaoq) @@ -218,7 +229,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) return; } - oops_in_progress = 1; + bust_spinlocks(1); oops_enter(); @@ -235,13 +246,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) /* unlock the pdc lock if necessary */ pdc_emergency_unlock(); - /* maybe the kernel hasn't booted very far yet and hasn't been able - * to initialize the serial or STI console. In that case we should - * re-enable the pdc console, so that the user will be able to - * identify the problem. */ - if (!console_drivers) - pdc_console_restart(); - if (err) printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", current->comm, task_pid_nr(current), str, err); @@ -265,7 +269,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* gdb uses break 4,8 */ @@ -273,7 +277,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) static void handle_gdb_break(struct pt_regs *regs, int wot) { force_sig_fault(SIGTRAP, wot, - (void __user *) (regs->iaoq[0] & ~3), current); + (void __user *) (regs->iaoq[0] & ~3)); } static void handle_break(struct pt_regs *regs) @@ -293,6 +297,31 @@ static void handle_break(struct pt_regs *regs) (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); } +#ifdef CONFIG_KPROBES + if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) { + parisc_kprobe_break_handler(regs); + return; + } + if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) { + parisc_kprobe_ss_handler(regs); + return; + } +#endif + +#ifdef CONFIG_KGDB + if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN || + iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) { + kgdb_handle_exception(9, SIGTRAP, 0, regs); + return; + } +#endif + +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) { + die_if_kernel("Spinlock was trashed", regs, 1); + } +#endif + if (unlikely(iir != GDB_BREAK_INSN)) parisc_printk_ratelimited(0, regs, KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", @@ -309,10 +338,7 @@ static void default_trap(int code, struct pt_regs *regs) show_regs(regs); } -void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; - - -void transfer_pim_to_trap_frame(struct pt_regs *regs) +static void transfer_pim_to_trap_frame(struct pt_regs *regs) { register int i; extern unsigned int hpmc_pim_data[]; @@ -396,7 +422,8 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o { static DEFINE_SPINLOCK(terminate_lock); - oops_in_progress = 1; + (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP); + bust_spinlocks(1); set_eiem(0); local_irq_disable(); @@ -405,10 +432,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o /* unlock the pdc lock if necessary */ pdc_emergency_unlock(); - /* restart pdc console if necessary */ - if (!console_drivers) - pdc_console_restart(); - /* Not all paths will gutter the processor... */ switch(code){ @@ -417,7 +440,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o break; default: - /* Fall through */ break; } @@ -426,7 +448,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ struct unwind_frame_info info; unwind_frame_init(&info, current, regs); - do_show_stack(&info); + do_show_stack(&info, KERN_CRIT); } printk("\n"); @@ -446,7 +468,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o * panic notifiers, and we should call panic * directly from the location that we wish. * e.g. We should not call panic from - * parisc_terminate, but rather the oter way around. + * parisc_terminate, but rather the other way around. * This hack works, prints the panic message twice, * and it enables reboot timers! */ @@ -459,9 +481,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) unsigned long fault_space = 0; int si_code; - if (code == 1) - pdc_console_restart(); /* switch back to pdc if HPMC */ - else + if (!irqs_disabled_flags(regs->gr[0])) local_irq_enable(); /* Security check: @@ -487,7 +507,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (((unsigned long)regs->iaoq[0] & 3) && ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { /* Kill the user process later */ - regs->iaoq[0] = 0 | 3; + regs->iaoq[0] = 0 | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; regs->iasq[0] = regs->iasq[1] = regs->sr[7]; regs->gr[0] &= ~PSW_B; @@ -518,6 +538,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 3: /* Recovery counter trap */ regs->gr[0] &= ~PSW_R; + +#ifdef CONFIG_KGDB + if (kgdb_single_step) { + kgdb_handle_exception(0, SIGTRAP, 0, regs); + return; + } +#endif + if (user_space(regs)) handle_gdb_break(regs, TRAP_TRACE); /* else this must be the start of a syscall - just let it run */ @@ -529,7 +557,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) flush_cache_all(); flush_tlb_all(); - cpu_lpmc(5, regs); + default_trap(code, regs); return; case PARISC_ITLB_TRAP: @@ -578,13 +606,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs) si_code = ILL_PRVREG; give_sigill: force_sig_fault(SIGILL, si_code, - (void __user *) regs->iaoq[0], current); + (void __user *) regs->iaoq[0]); return; case 12: /* Overflow Trap, let the userland signal handler do the cleanup */ force_sig_fault(SIGFPE, FPE_INTOVF, - (void __user *) regs->iaoq[0], current); + (void __user *) regs->iaoq[0]); return; case 13: @@ -596,7 +624,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) * to by si_addr. */ force_sig_fault(SIGFPE, FPE_CONDTRAP, - (void __user *) regs->iaoq[0], current); + (void __user *) regs->iaoq[0]); return; } /* The kernel doesn't want to handle condition codes */ @@ -606,17 +634,18 @@ void notrace handle_interruption(int code, struct pt_regs *regs) /* Assist Exception Trap, i.e. floating point exception. */ die_if_kernel("Floating point exception", regs, 0); /* quiet */ __inc_irq_stat(irq_fpassist_count); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); handle_fpe(regs); return; case 15: /* Data TLB miss fault/Data page fault */ - /* Fall through */ + fallthrough; case 16: /* Non-access instruction TLB miss fault */ /* The instruction TLB entry needed for the target address of the FIC is absent, and hardware can't find it, so we get to cleanup */ - /* Fall through */ + fallthrough; case 17: /* Non-access data TLB miss fault/Non-access data page fault */ /* FIXME: @@ -629,6 +658,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) by hand. Technically we need to emulate: fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ + if (code == 17 && handle_nadtlb_fault(regs)) + return; fault_address = regs->ior; fault_space = regs->isr; break; @@ -640,7 +671,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) handle_unaligned(regs); return; } - /* Fall Through */ + fallthrough; case 26: /* PCXL: Data memory access rights trap */ fault_address = regs->ior; @@ -650,7 +681,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 19: /* Data memory break trap */ regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ - /* fall thru */ + fallthrough; case 21: /* Page reference trap */ handle_gdb_break(regs, TRAP_HWBKPT); @@ -684,7 +715,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (user_mode(regs)) { struct vm_area_struct *vma; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); vma = find_vma(current->mm,regs->iaoq[0]); if (vma && (regs->iaoq[0] >= vma->vm_start) && (vma->vm_flags & VM_EXEC)) { @@ -692,12 +723,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); break; /* call do_page_fault() */ } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); } - /* Fall Through */ + /* CPU could not fetch instruction, so clear stale IIR value. */ + regs->iir = 0xbaadf00d; + fallthrough; case 27: /* Data memory protection ID trap */ if (code == 27 && !user_mode(regs) && @@ -708,7 +741,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) force_sig_fault(SIGSEGV, SEGV_MAPERR, (code == 7)? ((void __user *) regs->iaoq[0]) : - ((void __user *) regs->ior), current); + ((void __user *) regs->ior)); return; case 28: @@ -723,7 +756,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) task_pid_nr(current), current->comm); /* SIGBUS, for lack of a better one. */ force_sig_fault(SIGBUS, BUS_OBJERR, - (void __user *)regs->ior, current); + (void __user *)regs->ior); return; } pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); @@ -739,7 +772,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) code, fault_space, task_pid_nr(current), current->comm); force_sig_fault(SIGSEGV, SEGV_MAPERR, - (void __user *)regs->ior, current); + (void __user *)regs->ior); return; } } @@ -750,11 +783,15 @@ void notrace handle_interruption(int code, struct pt_regs *regs) * unless pagefault_disable() was called before. */ - if (fault_space == 0 && !faulthandler_disabled()) + if (faulthandler_disabled() || fault_space == 0) { /* Clean up and return if in exception table. */ if (fixup_exception(regs)) return; + /* Clean up and return if handled by kfence. */ + if (kfence_handle_page_fault(fault_address, + parisc_acctyp(code, regs->iir) == VM_WRITE, regs)) + return; pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); parisc_terminate("Kernel Fault", regs, code, fault_address); } @@ -764,16 +801,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } -void __init initialize_ivt(const void *iva) +static void __init initialize_ivt(const void *iva) { - extern u32 os_hpmc_size; extern const u32 os_hpmc[]; int i; u32 check = 0; u32 *ivap; - u32 *hpmcp; - u32 length, instr; + u32 instr; if (strcmp((const char *)iva, "cows can fly")) panic("IVT invalid"); @@ -804,18 +839,12 @@ void __init initialize_ivt(const void *iva) /* Setup IVA and compute checksum for HPMC handler */ ivap[6] = (u32)__pa(os_hpmc); - length = os_hpmc_size; - ivap[7] = length; - - hpmcp = (u32 *)os_hpmc; - - for (i=0; i<length/4; i++) - check += *hpmcp++; for (i=0; i<8; i++) check += ivap[i]; ivap[5] = -check; + pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]); } @@ -832,7 +861,3 @@ void __init early_trap_init(void) initialize_ivt(&fault_vector_20); } - -void __init trap_init(void) -{ -} diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 932bfc0b7cd8..fb64d9ce0b17 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -1,35 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Unaligned memory access handler * * Copyright (C) 2001 Randolph Chung <tausq@debian.org> + * Copyright (C) 2022 Helge Deller <deller@gmx.de> * Significantly tweaked by LaMont Jones <lamont@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ -#include <linux/jiffies.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/sched/signal.h> -#include <linux/sched/debug.h> #include <linux/signal.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> +#include <linux/sysctl.h> +#include <linux/unaligned.h> +#include <linux/perf_event.h> #include <asm/hardirq.h> #include <asm/traps.h> +#include "unaligned.h" /* #define DEBUG_UNALIGNED 1 */ @@ -39,18 +26,7 @@ #define DPRINTF(fmt, args...) #endif -#ifdef CONFIG_64BIT -#define RFMT "%016lx" -#else -#define RFMT "%08lx" -#endif - -#define FIXUP_BRANCH(lbl) \ - "\tldil L%%" #lbl ", %%r1\n" \ - "\tldo R%%" #lbl "(%%r1), %%r1\n" \ - "\tbv,n %%r0(%%r1)\n" -/* If you use FIXUP_BRANCH, then you must list this clobber */ -#define FIXUP_BRANCH_CLOBBER "r1" +#define RFMT "0x%08lx" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) @@ -121,44 +97,38 @@ #define R1(i) (((i)>>21)&0x1f) #define R2(i) (((i)>>16)&0x1f) #define R3(i) ((i)&0x1f) -#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) +#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1)) #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) #define IM5_2(i) IM((i)>>16,5) #define IM5_3(i) IM((i),5) #define IM14(i) IM((i),14) #define ERR_NOTHANDLED -1 -#define ERR_PAGEFAULT -2 int unaligned_enabled __read_mostly = 1; +int no_unaligned_warning __read_mostly; static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; - unsigned long val = 0; - int ret; + unsigned long val = 0, temp1; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( " mtsp %4, %%sr1\n" -"1: ldbs 0(%%sr1,%3), %%r20\n" +"1: ldbs 0(%%sr1,%3), %2\n" "2: ldbs 1(%%sr1,%3), %0\n" -" depw %%r20, 23, 24, %0\n" -" copy %%r0, %1\n" +" depw %2, 23, 24, %0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) - ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) - : "=r" (val), "=r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r20", FIXUP_BRANCH_CLOBBER ); - - DPRINTF("val = 0x" RFMT "\n", val); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "=&r" (temp1) + : "r" (saddr), "r" (regs->isr) ); + + DPRINTF("val = " RFMT "\n", val); if (toreg) regs->gr[toreg] = val; @@ -169,34 +139,28 @@ static int emulate_ldh(struct pt_regs *regs, int toreg) static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; - unsigned long val = 0; - int ret; + unsigned long val = 0, temp1, temp2; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", regs->isr, regs->ior, toreg); __asm__ __volatile__ ( -" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */ -" mtsp %4, %%sr1\n" -" depw %%r0,31,2,%3\n" -"1: ldw 0(%%sr1,%3),%0\n" -"2: ldw 4(%%sr1,%3),%%r20\n" -" subi 32,%%r19,%%r19\n" -" mtctl %%r19,11\n" -" vshd %0,%%r20,%0\n" -" copy %%r0, %1\n" +" zdep %4,28,2,%2\n" /* r19=(ofs&3)*8 */ +" mtsp %5, %%sr1\n" +" depw %%r0,31,2,%4\n" +"1: ldw 0(%%sr1,%4),%0\n" +"2: ldw 4(%%sr1,%4),%3\n" +" subi 32,%2,%2\n" +" mtctl %2,11\n" +" vshd %0,%3,%0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b, 4b) - ASM_EXCEPTIONTABLE_ENTRY(2b, 4b) - : "=r" (val), "=r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); - - DPRINTF("val = 0x" RFMT "\n", val); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2) + : "r" (saddr), "r" (regs->isr) ); + + DPRINTF("val = " RFMT "\n", val); if (flop) ((__u32*)(regs->fr))[toreg] = val; @@ -208,65 +172,49 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; + unsigned long shift, temp1; __u64 val = 0; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", regs->isr, regs->ior, toreg); -#ifdef CONFIG_PA20 -#ifndef CONFIG_64BIT - if (!flop) - return -1; -#endif + if (!IS_ENABLED(CONFIG_64BIT) && !flop) + return ERR_NOTHANDLED; + +#ifdef CONFIG_64BIT __asm__ __volatile__ ( -" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ -" mtsp %4, %%sr1\n" -" depd %%r0,63,3,%3\n" -"1: ldd 0(%%sr1,%3),%0\n" -"2: ldd 8(%%sr1,%3),%%r20\n" -" subi 64,%%r19,%%r19\n" -" mtsar %%r19\n" -" shrpd %0,%%r20,%%sar,%0\n" -" copy %%r0, %1\n" +" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */ +" mtsp %5, %%sr1\n" +" depd %%r0,63,3,%2\n" +"1: ldd 0(%%sr1,%2),%0\n" +"2: ldd 8(%%sr1,%2),%4\n" +" subi 64,%3,%3\n" +" mtsar %3\n" +" shrpd %0,%4,%%sar,%0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %1\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (val), "=r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); #else - { - unsigned long valh=0,vall=0; __asm__ __volatile__ ( -" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */ -" mtsp %6, %%sr1\n" -" dep %%r0,31,2,%5\n" -"1: ldw 0(%%sr1,%5),%0\n" -"2: ldw 4(%%sr1,%5),%1\n" -"3: ldw 8(%%sr1,%5),%%r20\n" -" subi 32,%%r19,%%r19\n" -" mtsar %%r19\n" -" vshd %0,%1,%0\n" -" vshd %1,%%r20,%1\n" -" copy %%r0, %2\n" +" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */ +" mtsp %5, %%sr1\n" +" dep %%r0,31,2,%2\n" +"1: ldw 0(%%sr1,%2),%0\n" +"2: ldw 4(%%sr1,%2),%R0\n" +"3: ldw 8(%%sr1,%2),%4\n" +" subi 32,%3,%3\n" +" mtsar %3\n" +" vshd %0,%R0,%0\n" +" vshd %R0,%4,%R0\n" "4: \n" -" .section .fixup,\"ax\"\n" -"5: ldi -2, %2\n" - FIXUP_BRANCH(4b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,5b) - ASM_EXCEPTIONTABLE_ENTRY(2b,5b) - ASM_EXCEPTIONTABLE_ENTRY(3b,5b) - : "=r" (valh), "=r" (vall), "=r" (ret) - : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr) - : "r19", "r20", FIXUP_BRANCH_CLOBBER ); - val=((__u64)valh<<32)|(__u64)vall; - } + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); #endif DPRINTF("val = 0x%llx\n", val); @@ -281,31 +229,25 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) static int emulate_sth(struct pt_regs *regs, int frreg) { - unsigned long val = regs->gr[frreg]; - int ret; + unsigned long val = regs->gr[frreg], temp1; + ASM_EXCEPTIONTABLE_VAR(ret); if (!frreg) val = 0; - DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, + DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, val, regs->isr, regs->ior); __asm__ __volatile__ ( -" mtsp %3, %%sr1\n" -" extrw,u %1, 23, 8, %%r19\n" -"1: stb %1, 1(%%sr1, %2)\n" -"2: stb %%r19, 0(%%sr1, %2)\n" -" copy %%r0, %0\n" +" mtsp %4, %%sr1\n" +" extrw,u %2, 23, 8, %1\n" +"1: stb %1, 0(%%sr1, %3)\n" +"2: stb %2, 1(%%sr1, %3)\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %0\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (ret) - : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") + : "+r" (ret), "=&r" (temp1) + : "r" (val), "r" (regs->ior), "r" (regs->isr) ); return ret; } @@ -313,7 +255,7 @@ static int emulate_sth(struct pt_regs *regs, int frreg) static int emulate_stw(struct pt_regs *regs, int frreg, int flop) { unsigned long val; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); if (flop) val = ((__u32*)(regs->fr))[frreg]; @@ -322,7 +264,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) else val = 0; - DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, + DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, val, regs->isr, regs->ior); @@ -342,24 +284,19 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) " or %%r1, %%r21, %%r21\n" " stw %%r20,0(%%sr1,%2)\n" " stw %%r21,4(%%sr1,%2)\n" -" copy %%r0, %0\n" "3: \n" -" .section .fixup,\"ax\"\n" -"4: ldi -2, %0\n" - FIXUP_BRANCH(3b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,4b) - ASM_EXCEPTIONTABLE_ENTRY(2b,4b) - : "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") + : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20", "r21", "r22", "r1" ); - return 0; + return ret; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { __u64 val; - int ret; + ASM_EXCEPTIONTABLE_VAR(ret); if (flop) val = regs->fr[frreg]; @@ -371,11 +308,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg, val, regs->isr, regs->ior); -#ifdef CONFIG_PA20 -#ifndef CONFIG_64BIT - if (!flop) - return -1; -#endif + if (!IS_ENABLED(CONFIG_64BIT) && !flop) + return ERR_NOTHANDLED; + +#ifdef CONFIG_64BIT __asm__ __volatile__ ( " mtsp %3, %%sr1\n" " depd,z %2, 60, 3, %%r19\n" @@ -392,54 +328,43 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " or %%r1, %%r21, %%r21\n" "3: std %%r20,0(%%sr1,%2)\n" "4: std %%r21,8(%%sr1,%2)\n" -" copy %%r0, %0\n" "5: \n" -" .section .fixup,\"ax\"\n" -"6: ldi -2, %0\n" - FIXUP_BRANCH(5b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,6b) - ASM_EXCEPTIONTABLE_ENTRY(2b,6b) - ASM_EXCEPTIONTABLE_ENTRY(3b,6b) - ASM_EXCEPTIONTABLE_ENTRY(4b,6b) - : "=r" (ret) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0") + : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); + : "r19", "r20", "r21", "r22", "r1" ); #else { - unsigned long valh=(val>>32),vall=(val&0xffffffffl); __asm__ __volatile__ ( -" mtsp %4, %%sr1\n" -" zdep %2, 29, 2, %%r19\n" +" mtsp %3, %%sr1\n" +" zdep %R1, 29, 2, %%r19\n" " dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" -"1: ldw 0(%%sr1,%3),%%r20\n" -"2: ldw 8(%%sr1,%3),%%r21\n" -" vshd %1, %2, %%r1\n" +"1: ldw 0(%%sr1,%2),%%r20\n" +"2: ldw 8(%%sr1,%2),%%r21\n" +" vshd %1, %R1, %%r1\n" " vshd %%r0, %1, %1\n" -" vshd %2, %%r0, %2\n" +" vshd %R1, %%r0, %R1\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" -" or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%1)\n" -"4: stw %%r1,4(%%sr1,%3)\n" -"5: stw %2,8(%%sr1,%3)\n" -" copy %%r0, %0\n" +" or %R1, %%r21, %R1\n" +"3: stw %1,0(%%sr1,%2)\n" +"4: stw %%r1,4(%%sr1,%2)\n" +"5: stw %R1,8(%%sr1,%2)\n" "6: \n" -" .section .fixup,\"ax\"\n" -"7: ldi -2, %0\n" - FIXUP_BRANCH(6b) -" .previous\n" - ASM_EXCEPTIONTABLE_ENTRY(1b,7b) - ASM_EXCEPTIONTABLE_ENTRY(2b,7b) - ASM_EXCEPTIONTABLE_ENTRY(3b,7b) - ASM_EXCEPTIONTABLE_ENTRY(4b,7b) - ASM_EXCEPTIONTABLE_ENTRY(5b,7b) - : "=r" (ret) - : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0") + : "+r" (ret) + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r1" ); } #endif @@ -452,9 +377,9 @@ void handle_unaligned(struct pt_regs *regs) unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; int modify = 0; int ret = ERR_NOTHANDLED; - register int flop=0; /* true if this is a flop */ __inc_irq_stat(irq_unaligned_count); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->ior); /* log a message with pacing */ if (user_mode(regs)) { @@ -464,10 +389,10 @@ void handle_unaligned(struct pt_regs *regs) if (!(current->thread.flags & PARISC_UAC_NOPRINT) && __ratelimit(&ratelimit)) { - char buf[256]; - sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", - current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); - printk(KERN_WARNING "%s", buf); + printk(KERN_WARNING "%s(%d): unaligned access to " RFMT + " at ip " RFMT " (iir " RFMT ")\n", + current->comm, task_pid_nr(current), regs->ior, + regs->iaoq[0], regs->iir); #ifdef DEBUG_UNALIGNED show_regs(regs); #endif @@ -475,6 +400,14 @@ void handle_unaligned(struct pt_regs *regs) if (!unaligned_enabled) goto force_sigbus; + } else { + static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5); + if (!(current->thread.flags & PARISC_UAC_NOPRINT) && + !no_unaligned_warning && + __ratelimit(&kernel_ratelimit)) + pr_warn("Kernel: unaligned access to " RFMT " in %pS " + "(iir " RFMT ")\n", + regs->ior, (void *)regs->iaoq[0], regs->iir); } /* handle modification - OK, it's ugly, see the instruction manual */ @@ -549,7 +482,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: - ret = emulate_ldw(regs, R3(regs->iir),0); + ret = emulate_ldw(regs, R3(regs->iir), 0); break; case OPCODE_STH: @@ -558,20 +491,20 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_STW: case OPCODE_STWA: - ret = emulate_stw(regs, R2(regs->iir),0); + ret = emulate_stw(regs, R2(regs->iir), 0); break; -#ifdef CONFIG_PA20 +#ifdef CONFIG_64BIT case OPCODE_LDD_I: case OPCODE_LDDA_I: case OPCODE_LDD_S: case OPCODE_LDDA_S: - ret = emulate_ldd(regs, R3(regs->iir),0); + ret = emulate_ldd(regs, R3(regs->iir), 0); break; case OPCODE_STD: case OPCODE_STDA: - ret = emulate_std(regs, R2(regs->iir),0); + ret = emulate_std(regs, R2(regs->iir), 0); break; #endif @@ -579,28 +512,24 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: - flop=1; - ret = emulate_ldw(regs,FR3(regs->iir),1); + ret = emulate_ldw(regs, FR3(regs->iir), 1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: - flop=1; - ret = emulate_ldd(regs,R3(regs->iir),1); + ret = emulate_ldd(regs, R3(regs->iir), 1); break; case OPCODE_FSTWX: case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: - flop=1; - ret = emulate_stw(regs,FR3(regs->iir),1); + ret = emulate_stw(regs, FR3(regs->iir), 1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: - flop=1; - ret = emulate_std(regs,R3(regs->iir),1); + ret = emulate_std(regs, R3(regs->iir), 1); break; case OPCODE_LDCD_I: @@ -610,37 +539,33 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } -#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: - flop=1; ret = emulate_ldd(regs,R2(regs->iir),1); break; case OPCODE_FSTD_L: - flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; +#ifdef CONFIG_64BIT case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; - } #endif + } switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: - flop=1; - ret = emulate_ldw(regs, R2(regs->iir),0); + ret = emulate_ldw(regs, R2(regs->iir), 1); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir),1); + ret = emulate_ldw(regs, R2(regs->iir), 0); break; case OPCODE_FSTW_L: - flop=1; ret = emulate_stw(regs, R2(regs->iir),1); break; case OPCODE_STW_M: @@ -687,17 +612,17 @@ void handle_unaligned(struct pt_regs *regs) printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); - if (ret == ERR_PAGEFAULT) + if (ret == -EFAULT) { force_sig_fault(SIGSEGV, SEGV_MAPERR, - (void __user *)regs->ior, current); + (void __user *)regs->ior); } else { force_sigbus: /* couldn't handle it ... */ force_sig_fault(SIGBUS, BUS_ADRALN, - (void __user *)regs->ior, current); + (void __user *)regs->ior); } return; diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h new file mode 100644 index 000000000000..c1aa4b12e284 --- /dev/null +++ b/arch/parisc/kernel/unaligned.h @@ -0,0 +1,3 @@ +struct pt_regs; +void handle_unaligned(struct pt_regs *regs); +int check_unaligned(struct pt_regs *regs); diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 2d14f17838d2..7ac88ff13d3c 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/sort.h> +#include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/assembly.h> @@ -21,16 +22,21 @@ #include <asm/ptrace.h> #include <asm/unwind.h> +#include <asm/switch_to.h> +#include <asm/sections.h> +#include <asm/ftrace.h> /* #define DEBUG 1 */ #ifdef DEBUG #define dbg(x...) pr_debug(x) #else -#define dbg(x...) +#define dbg(x...) do { } while (0) #endif #define KERNEL_START (KERNEL_BINARY_TEXT_START) +#define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0) + extern struct unwind_table_entry __start___unwind[]; extern struct unwind_table_entry __stop___unwind[]; @@ -40,7 +46,7 @@ static DEFINE_SPINLOCK(unwind_lock); * we can call unwind_init as early in the bootup process as * possible (before the slab allocator is initialized) */ -static struct unwind_table kernel_unwind_table __read_mostly; +static struct unwind_table kernel_unwind_table __ro_after_init; static LIST_HEAD(unwind_tables); static inline const struct unwind_table_entry * @@ -176,7 +182,7 @@ void unwind_table_remove(struct unwind_table *table) /* Called from setup_arch to import the kernel unwind info */ int __init unwind_init(void) { - long start, stop; + long start __maybe_unused, stop __maybe_unused; register unsigned long gp __asm__ ("r27"); start = (long)&__start___unwind[0]; @@ -203,6 +209,11 @@ int __init unwind_init(void) return 0; } +static bool pc_is_kernel_fn(unsigned long pc, void *fn) +{ + return (unsigned long)dereference_kernel_function_descriptor(fn) == pc; +} + static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) { /* @@ -212,7 +223,6 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int * Note: We could use dereference_kernel_function_descriptor() * instead but we want to keep it simple here. */ - extern void * const handle_interruption; extern void * const ret_from_kernel_thread; extern void * const syscall_exit; extern void * const intr_return; @@ -221,7 +231,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int extern void * const _call_on_stack; #endif /* CONFIG_IRQSTACKS */ - if (pc == (unsigned long) &handle_interruption) { + if (pc_is_kernel_fn(pc, handle_interruption)) { struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); dbg("Unwinding through handle_interruption()\n"); info->prev_sp = regs->gr[30]; @@ -229,13 +239,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int return 1; } - if (pc == (unsigned long) &ret_from_kernel_thread || - pc == (unsigned long) &syscall_exit) { + if (pc == (unsigned long)&ret_from_kernel_thread || + pc == (unsigned long)&syscall_exit) { info->prev_sp = info->prev_ip = 0; return 1; } - if (pc == (unsigned long) &intr_return) { + if (pc == (unsigned long)&intr_return) { struct pt_regs *regs; dbg("Found intr_return()\n"); @@ -246,20 +256,23 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int return 1; } - if (pc == (unsigned long) &_switch_to_ret) { + if (pc_is_kernel_fn(pc, _switch_to) || + pc == (unsigned long)&_switch_to_ret) { info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; - info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); + if (ALIGNMENT_OK(info->prev_sp, long)) + info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); + else + info->prev_ip = info->prev_sp = 0; return 1; } #ifdef CONFIG_IRQSTACKS - if (pc == (unsigned long) &_call_on_stack) { + if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) { info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); return 1; } #endif - return 0; } @@ -292,17 +305,15 @@ static void unwind_frame_regs(struct unwind_frame_info *info) info->prev_sp = sp - 64; info->prev_ip = 0; - /* The stack is at the end inside the thread_union - * struct. If we reach data, we have reached the - * beginning of the stack and should stop unwinding. */ - if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && - info->prev_sp < ((unsigned long) task_thread_info(info->t) - + THREAD_SZ_ALGN)) { + /* Check if stack is inside kernel stack area */ + if ((info->prev_sp - (unsigned long) task_stack_page(info->t)) + >= THREAD_SIZE) { info->prev_sp = 0; break; } - if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) + if (copy_from_kernel_nofault(&tmp, + (void *)info->prev_sp - RP_OFFSET, sizeof(tmp))) break; info->prev_ip = tmp; sp = info->prev_sp; @@ -364,8 +375,10 @@ static void unwind_frame_regs(struct unwind_frame_info *info) info->prev_sp = info->sp - frame_size; if (e->Millicode) info->rp = info->r31; - else if (rpoffset) + else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long)) info->rp = *(unsigned long *)(info->prev_sp - rpoffset); + else + info->rp = 0; info->prev_ip = info->rp; info->rp = 0; } diff --git a/arch/parisc/kernel/vdso.c b/arch/parisc/kernel/vdso.c new file mode 100644 index 000000000000..c5cbfce7a84c --- /dev/null +++ b/arch/parisc/kernel/vdso.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Helge Deller <deller@gmx.de> + * + * based on arch/s390/kernel/vdso.c which is + * Copyright IBM Corp. 2008 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/elf.h> +#include <linux/timekeeper_internal.h> +#include <linux/compat.h> +#include <linux/nsproxy.h> +#include <linux/time_namespace.h> +#include <linux/random.h> + +#include <asm/pgtable.h> +#include <asm/page.h> +#include <asm/sections.h> +#include <asm/vdso.h> +#include <asm/cacheflush.h> + +extern char vdso32_start, vdso32_end; +extern char vdso64_start, vdso64_end; + +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *vma) +{ + current->mm->context.vdso_base = vma->vm_start; + return 0; +} + +#ifdef CONFIG_64BIT +static struct vm_special_mapping vdso64_mapping = { + .name = "[vdso]", + .mremap = vdso_mremap, +}; +#endif + +static struct vm_special_mapping vdso32_mapping = { + .name = "[vdso]", + .mremap = vdso_mremap, +}; + +/* + * This is called from binfmt_elf, we create the special vma for the + * vDSO and insert it into the mm struct tree + */ +int arch_setup_additional_pages(struct linux_binprm *bprm, + int executable_stack) +{ + + unsigned long vdso_text_start, vdso_text_len, map_base; + struct vm_special_mapping *vdso_mapping; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int rc; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + +#ifdef CONFIG_64BIT + if (!is_compat_task()) { + vdso_text_len = &vdso64_end - &vdso64_start; + vdso_mapping = &vdso64_mapping; + } else +#endif + { + vdso_text_len = &vdso32_end - &vdso32_start; + vdso_mapping = &vdso32_mapping; + } + + map_base = mm->mmap_base; + if (current->flags & PF_RANDOMIZE) + map_base -= get_random_u32_below(0x20) * PAGE_SIZE; + + vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0); + + /* VM_MAYWRITE for COW so gdb can set breakpoints */ + vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vdso_mapping); + if (IS_ERR(vma)) { + do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL); + rc = PTR_ERR(vma); + } else { + current->mm->context.vdso_base = vdso_text_start; + rc = 0; + } + + mmap_write_unlock(mm); + return rc; +} + +static struct page ** __init vdso_setup_pages(void *start, void *end) +{ + int pages = (end - start) >> PAGE_SHIFT; + struct page **pagelist; + int i; + + pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL); + if (!pagelist) + panic("%s: Cannot allocate page list for VDSO", __func__); + for (i = 0; i < pages; i++) + pagelist[i] = virt_to_page(start + i * PAGE_SIZE); + return pagelist; +} + +static int __init vdso_init(void) +{ +#ifdef CONFIG_64BIT + vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end); +#endif + if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT)) + vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end); + return 0; +} +arch_initcall(vdso_init); diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile new file mode 100644 index 000000000000..4ee8d17da229 --- /dev/null +++ b/arch/parisc/kernel/vdso32/Makefile @@ -0,0 +1,66 @@ +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n + +obj-vdso32 = note.o sigtramp.o restart_syscall.o +obj-cvdso32 = vdso32_generic.o + +# Build rules + +targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so +obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) +obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32)) + +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE) + +ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls +# -march=1.1 -mschedule=7100LC +ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) +asflags-y := -D__VDSO32__ -s + +KBUILD_AFLAGS += -DBUILD_VDSO +KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING + +VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name) + +obj-y += vdso32_wrapper.o +targets += vdso32.lds +CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH) + +$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE + +# Force dependency (incbin is bad) +# link rule for the .so file, .lds has to be first +$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE + $(call if_changed,vdso32ld) + +# assembly rules for the .S files +$(obj-vdso32): %.o: %.S FORCE + $(call if_changed_dep,vdso32as) +$(obj-cvdso32): %.o: %.c FORCE + $(call if_changed_dep,vdso32cc) + +# actual build commands +quiet_cmd_vdso32ld = VDSO32L $@ + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ +quiet_cmd_vdso32as = VDSO32A $@ + cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso32cc = VDSO32C $@ + cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $< + +# Generate VDSO offsets using helper script +gen-vdsosym := $(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso32-offsets.h: $(obj)/vdso32.so FORCE + $(call if_changed,vdsosym) diff --git a/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh b/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh new file mode 100755 index 000000000000..da39d6cff7f1 --- /dev/null +++ b/arch/parisc/kernel/vdso32/gen_vdso_offsets.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Inspired by arm64 version. +# + +LC_ALL=C +sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso32_offset_\2\t0x\1/p' diff --git a/arch/parisc/kernel/vdso32/note.S b/arch/parisc/kernel/vdso32/note.S new file mode 100644 index 000000000000..bb350918bebd --- /dev/null +++ b/arch/parisc/kernel/vdso32/note.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> + +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ + .section name, flags; \ + .balign 4; \ + .long 1f - 0f; /* name length */ \ + .long 3f - 2f; /* data length */ \ + .long type; /* note type */ \ +0: .asciz vendor; /* vendor name */ \ +1: .balign 4; \ +2: + +#define ASM_ELF_NOTE_END \ +3: .balign 4; /* pad out section */ \ + .previous + + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) + .long LINUX_VERSION_CODE + ASM_ELF_NOTE_END diff --git a/arch/parisc/kernel/vdso32/restart_syscall.S b/arch/parisc/kernel/vdso32/restart_syscall.S new file mode 100644 index 000000000000..7e82008d7e40 --- /dev/null +++ b/arch/parisc/kernel/vdso32/restart_syscall.S @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Syscall restart trampoline for 32 and 64 bits processes. + * + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ + +#include <asm/unistd.h> +#include <asm/vdso.h> + +#include <linux/linkage.h> + + .text + +ENTRY_CFI(__kernel_restart_syscall) + /* + * Setup a trampoline to restart the syscall + * with __NR_restart_syscall + */ + + /* load return pointer */ +#if defined(__VDSO64__) + ldd 0(%sp), %r31 +#elif defined(__VDSO32__) + ldw 0(%sp), %r31 +#endif + + be 0x100(%sr2, %r0) + ldi __NR_restart_syscall, %r20 + +ENDPROC_CFI(__kernel_restart_syscall) diff --git a/arch/parisc/kernel/vdso32/sigtramp.S b/arch/parisc/kernel/vdso32/sigtramp.S new file mode 100644 index 000000000000..192da7077869 --- /dev/null +++ b/arch/parisc/kernel/vdso32/sigtramp.S @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Signal trampolines for 32 bit processes. + * + * Copyright (C) 2006 Randolph Chung <tausq@debian.org> + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ +#include <asm/unistd.h> +#include <linux/linkage.h> +#include <generated/asm-offsets.h> + + .text + +/* Gdb expects the trampoline is on the stack and the pc is offset from + a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline + is not on the stack, we need a new variant with different offsets and + data to tell gdb where to find the signal context on the stack. + + Here we put the offset to the context data at the start of the trampoline + region and offset the first trampoline by 2 instructions. Please do + not change the trampoline as the code in gdb depends on the following + instruction sequence exactly. + */ + .align 64 + .word SIGFRAME_CONTEXT_REGS32 + +/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from + the return address to get an address in the middle of the presumed + call instruction. Since we don't have a call here, we artifically + extend the range covered by the unwind info by adding a nop before + the real start. + */ + nop + + .globl __kernel_sigtramp_rt + .type __kernel_sigtramp_rt, @function +__kernel_sigtramp_rt: + .proc + .callinfo FRAME=ASM_SIGFRAME_SIZE32,CALLS,SAVE_RP + .entry + +.Lsigrt_start = . - 4 +0: ldi 0, %r25 /* (in_syscall=0) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop + +1: ldi 1, %r25 /* (in_syscall=1) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop +.Lsigrt_end: + .exit + .procend + .size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt + + + .section .eh_frame,"a",@progbits + +/* This is where the mcontext_t struct can be found on the stack. */ +#define PTREGS SIGFRAME_CONTEXT_REGS32 /* 32-bit process offset is -672 */ + +/* Register REGNO can be found at offset OFS of the mcontext_t structure. */ + .macro rsave regno,ofs + .byte 0x05 /* DW_CFA_offset_extended */ + .uleb128 \regno; /* regno */ + .uleb128 \ofs /* factored offset */ + .endm + +.Lcie: + .long .Lcie_end - .Lcie_start +.Lcie_start: + .long 0 /* CIE ID */ + .byte 1 /* Version number */ + .stringz "zRS" /* NUL-terminated augmentation string */ + .uleb128 4 /* Code alignment factor */ + .sleb128 4 /* Data alignment factor */ + .byte 89 /* Return address register column, iaoq[0] */ + .uleb128 1 /* Augmentation value length */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0x0f /* DW_CFA_def_cfa_expresion */ + .uleb128 9f - 1f /* length */ +1: + .byte 0x8e /* DW_OP_breg30 */ + .sleb128 PTREGS +9: + .balign 4 +.Lcie_end: + + .long .Lfde0_end - .Lfde0_start +.Lfde0_start: + .long .Lfde0_start - .Lcie /* CIE pointer. */ + .long .Lsigrt_start - . /* PC start, length */ + .long .Lsigrt_end - .Lsigrt_start + .uleb128 0 /* Augmentation */ + + /* General registers */ + rsave 1, 2 + rsave 2, 3 + rsave 3, 4 + rsave 4, 5 + rsave 5, 6 + rsave 6, 7 + rsave 7, 8 + rsave 8, 9 + rsave 9, 10 + rsave 10, 11 + rsave 11, 12 + rsave 12, 13 + rsave 13, 14 + rsave 14, 15 + rsave 15, 16 + rsave 16, 17 + rsave 17, 18 + rsave 18, 19 + rsave 19, 20 + rsave 20, 21 + rsave 21, 22 + rsave 22, 23 + rsave 23, 24 + rsave 24, 25 + rsave 25, 26 + rsave 26, 27 + rsave 27, 28 + rsave 28, 29 + rsave 29, 30 + rsave 30, 31 + rsave 31, 32 + + /* Floating-point registers */ + rsave 32, 42 + rsave 33, 43 + rsave 34, 44 + rsave 35, 45 + rsave 36, 46 + rsave 37, 47 + rsave 38, 48 + rsave 39, 49 + rsave 40, 50 + rsave 41, 51 + rsave 42, 52 + rsave 43, 53 + rsave 44, 54 + rsave 45, 55 + rsave 46, 56 + rsave 47, 57 + rsave 48, 58 + rsave 49, 59 + rsave 50, 60 + rsave 51, 61 + rsave 52, 62 + rsave 53, 63 + rsave 54, 64 + rsave 55, 65 + rsave 56, 66 + rsave 57, 67 + rsave 58, 68 + rsave 59, 69 + rsave 60, 70 + rsave 61, 71 + rsave 62, 72 + rsave 63, 73 + rsave 64, 74 + rsave 65, 75 + rsave 66, 76 + rsave 67, 77 + rsave 68, 78 + rsave 69, 79 + rsave 70, 80 + rsave 71, 81 + rsave 72, 82 + rsave 73, 83 + rsave 74, 84 + rsave 75, 85 + rsave 76, 86 + rsave 77, 87 + rsave 78, 88 + rsave 79, 89 + rsave 80, 90 + rsave 81, 91 + rsave 82, 92 + rsave 83, 93 + rsave 84, 94 + rsave 85, 95 + rsave 86, 96 + rsave 87, 97 + + /* SAR register */ + rsave 88, 102 + + /* iaoq[0] return address register */ + rsave 89, 100 + .balign 4 +.Lfde0_end: diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S new file mode 100644 index 000000000000..4273baa26b65 --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32.lds.S @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the infamous ld script for the 32 bits vdso library + */ +#include <asm/vdso.h> +#include <asm/page.h> + +/* Default link addresses for the vDSOs */ +OUTPUT_FORMAT("elf32-hppa-linux") +OUTPUT_ARCH(hppa) +ENTRY(_start) + +SECTIONS +{ + . = VDSO_LBASE + SIZEOF_HEADERS; + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN (16); + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + /* Other stuff is appended to the text segment: */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .rodata2 : { *(.data.rel.ro) } + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table) } + .fixup : { *(.fixup) } + + .dynamic : { *(.dynamic) } :text :dynamic + .plt : { *(.plt) } + .got : { *(.got) } + + _end = .; + __end = .; + PROVIDE (end = .); + + + /* Stabs debugging sections are here too + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + + /DISCARD/ : { *(.note.GNU-stack) } + /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) } + /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) } +} + + +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + __kernel_sigtramp_rt32; + __kernel_restart_syscall32; + __vdso_gettimeofday; + __vdso_clock_gettime; + __vdso_clock_gettime64; + local: *; + }; +} diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c new file mode 100644 index 000000000000..8d5bd59e8646 --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32_generic.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> +#include <uapi/asm/unistd_32.h> + +struct timezone; +struct old_timespec32; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} + +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime64, (long)clock, (long)ts); +} diff --git a/arch/parisc/kernel/vdso32/vdso32_wrapper.S b/arch/parisc/kernel/vdso32/vdso32_wrapper.S new file mode 100644 index 000000000000..5ac06093e8ec --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32_wrapper.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso32_start, vdso32_end + .balign PAGE_SIZE +vdso32_start: + .incbin "arch/parisc/kernel/vdso32/vdso32.so" + .balign PAGE_SIZE +vdso32_end: + + .previous diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile new file mode 100644 index 000000000000..c63f4069170f --- /dev/null +++ b/arch/parisc/kernel/vdso64/Makefile @@ -0,0 +1,65 @@ +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n + +obj-vdso64 = note.o sigtramp.o restart_syscall.o +obj-cvdso64 = vdso64_generic.o + +# Build rules + +targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so +obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64)) + +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) + +ccflags-y := -shared -fno-common -fno-builtin +ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ + $(call ld-option, -Wl$(comma)--hash-style=sysv) +asflags-y := -D__VDSO64__ -s + +KBUILD_AFLAGS += -DBUILD_VDSO +KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING + +VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name) + +obj-y += vdso64_wrapper.o +targets += vdso64.lds +CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) + +$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE + +# Force dependency (incbin is bad) +# link rule for the .so file, .lds has to be first +$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE + $(call if_changed,vdso64ld) + +# assembly rules for the .S files +$(obj-vdso64): %.o: %.S FORCE + $(call if_changed_dep,vdso64as) +$(obj-cvdso64): %.o: %.c FORCE + $(call if_changed_dep,vdso64cc) + +# actual build commands +quiet_cmd_vdso64ld = VDSO64L $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ +quiet_cmd_vdso64as = VDSO64A $@ + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso64cc = VDSO64C $@ + cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< + +# Generate VDSO offsets using helper script +gen-vdsosym := $(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +include/generated/vdso64-offsets.h: $(obj)/vdso64.so FORCE + $(call if_changed,vdsosym) diff --git a/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh b/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh new file mode 100755 index 000000000000..37f05cb38dad --- /dev/null +++ b/arch/parisc/kernel/vdso64/gen_vdso_offsets.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Inspired by arm64 version. +# + +LC_ALL=C +sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p' diff --git a/arch/parisc/kernel/vdso64/note.S b/arch/parisc/kernel/vdso64/note.S new file mode 100644 index 000000000000..bd1fa23597d6 --- /dev/null +++ b/arch/parisc/kernel/vdso64/note.S @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include "../vdso32/note.S" diff --git a/arch/parisc/kernel/vdso64/restart_syscall.S b/arch/parisc/kernel/vdso64/restart_syscall.S new file mode 100644 index 000000000000..83004451f6b1 --- /dev/null +++ b/arch/parisc/kernel/vdso64/restart_syscall.S @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include "../vdso32/restart_syscall.S" diff --git a/arch/parisc/kernel/vdso64/sigtramp.S b/arch/parisc/kernel/vdso64/sigtramp.S new file mode 100644 index 000000000000..66a6d2b241e1 --- /dev/null +++ b/arch/parisc/kernel/vdso64/sigtramp.S @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Signal trampolines for 64 bit processes. + * + * Copyright (C) 2006 Randolph Chung <tausq@debian.org> + * Copyright (C) 2018-2022 Helge Deller <deller@gmx.de> + * Copyright (C) 2022 John David Anglin <dave.anglin@bell.net> + */ +#include <asm/unistd.h> +#include <linux/linkage.h> +#include <generated/asm-offsets.h> + + .text + +/* Gdb expects the trampoline is on the stack and the pc is offset from + a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline + is not on the stack, we need a new variant with different offsets and + data to tell gdb where to find the signal context on the stack. + + Here we put the offset to the context data at the start of the trampoline + region and offset the first trampoline by 2 instructions. Please do + not change the trampoline as the code in gdb depends on the following + instruction sequence exactly. + */ + .align 64 + .word SIGFRAME_CONTEXT_REGS + +/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from + the return address to get an address in the middle of the presumed + call instruction. Since we don't have a call here, we artifically + extend the range covered by the unwind info by adding a nop before + the real start. + */ + nop + + .globl __kernel_sigtramp_rt + .type __kernel_sigtramp_rt, @function +__kernel_sigtramp_rt: + .proc + .callinfo FRAME=ASM_SIGFRAME_SIZE,CALLS,SAVE_RP + .entry + +.Lsigrt_start = . - 4 +0: ldi 0, %r25 /* (in_syscall=0) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop + +1: ldi 1, %r25 /* (in_syscall=1) */ + ldi __NR_rt_sigreturn, %r20 + ble 0x100(%sr2, %r0) + nop +.Lsigrt_end: + .exit + .procend + .size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt + + .section .eh_frame,"a",@progbits + +/* This is where the mcontext_t struct can be found on the stack. */ +#define PTREGS SIGFRAME_CONTEXT_REGS /* 64-bit process offset is -720 */ + +/* Register REGNO can be found at offset OFS of the mcontext_t structure. */ + .macro rsave regno,ofs + .byte 0x05 /* DW_CFA_offset_extended */ + .uleb128 \regno; /* regno */ + .uleb128 \ofs /* factored offset */ + .endm + +.Lcie: + .long .Lcie_end - .Lcie_start +.Lcie_start: + .long 0 /* CIE ID */ + .byte 1 /* Version number */ + .stringz "zRS" /* NUL-terminated augmentation string */ + .uleb128 4 /* Code alignment factor */ + .sleb128 8 /* Data alignment factor */ + .byte 61 /* Return address register column, iaoq[0] */ + .uleb128 1 /* Augmentation value length */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0x0f /* DW_CFA_def_cfa_expresion */ + .uleb128 9f - 1f /* length */ +1: + .byte 0x8e /* DW_OP_breg30 */ + .sleb128 PTREGS +9: + .balign 8 +.Lcie_end: + + .long .Lfde0_end - .Lfde0_start +.Lfde0_start: + .long .Lfde0_start - .Lcie /* CIE pointer. */ + .long .Lsigrt_start - . /* PC start, length */ + .long .Lsigrt_end - .Lsigrt_start + .uleb128 0 /* Augmentation */ + + /* General registers */ + rsave 1, 2 + rsave 2, 3 + rsave 3, 4 + rsave 4, 5 + rsave 5, 6 + rsave 6, 7 + rsave 7, 8 + rsave 8, 9 + rsave 9, 10 + rsave 10, 11 + rsave 11, 12 + rsave 12, 13 + rsave 13, 14 + rsave 14, 15 + rsave 15, 16 + rsave 16, 17 + rsave 17, 18 + rsave 18, 19 + rsave 19, 20 + rsave 20, 21 + rsave 21, 22 + rsave 22, 23 + rsave 23, 24 + rsave 24, 25 + rsave 25, 26 + rsave 26, 27 + rsave 27, 28 + rsave 28, 29 + rsave 29, 30 + rsave 30, 31 + rsave 31, 32 + + /* Floating-point registers */ + rsave 32, 36 + rsave 33, 37 + rsave 34, 38 + rsave 35, 39 + rsave 36, 40 + rsave 37, 41 + rsave 38, 42 + rsave 39, 43 + rsave 40, 44 + rsave 41, 45 + rsave 42, 46 + rsave 43, 47 + rsave 44, 48 + rsave 45, 49 + rsave 46, 50 + rsave 47, 51 + rsave 48, 52 + rsave 49, 53 + rsave 50, 54 + rsave 51, 55 + rsave 52, 56 + rsave 53, 57 + rsave 54, 58 + rsave 55, 59 + rsave 56, 60 + rsave 57, 61 + rsave 58, 62 + rsave 59, 63 + + /* SAR register */ + rsave 60, 67 + + /* iaoq[0] return address register */ + rsave 61, 65 + .balign 8 +.Lfde0_end: diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..10f25e4e1554 --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64.lds.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the infamous ld script for the 64 bits vdso library + */ +#include <asm/vdso.h> + +/* Default link addresses for the vDSOs */ +OUTPUT_FORMAT("elf64-hppa-linux") +OUTPUT_ARCH(hppa:hppa2.0w) +ENTRY(_start) + +SECTIONS +{ + . = VDSO_LBASE + SIZEOF_HEADERS; + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN (16); + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + /* Other stuff is appended to the text segment: */ + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + .gcc_except_table : { *(.gcc_except_table) } + .fixup : { *(.fixup) } + + .dynamic : { *(.dynamic) } :text :dynamic + .plt : { *(.plt) } + .got : { *(.got) } + + _end = .; + __end = .; + PROVIDE (end = .); + + + /* Stabs debugging sections are here too + */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + + /DISCARD/ : { *(.note.GNU-stack) } + /DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) } + /DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) } +} + + +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + VDSO_VERSION_STRING { + global: + __kernel_sigtramp_rt64; + __kernel_restart_syscall64; + __vdso_gettimeofday; + __vdso_clock_gettime; + local: *; + }; +} diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c new file mode 100644 index 000000000000..fc6836a0075b --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64_generic.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> + +struct timezone; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} diff --git a/arch/parisc/kernel/vdso64/vdso64_wrapper.S b/arch/parisc/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..66f929482d3d --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64_wrapper.S @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso64_start, vdso64_end + .balign PAGE_SIZE +vdso64_start: + .incbin "arch/parisc/kernel/vdso64/vdso64.so" + .balign PAGE_SIZE +vdso64_end: + + .previous diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index c3b1b9c24ede..b445e47903cf 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -18,6 +18,9 @@ *(.data..vm0.pgd) \ *(.data..vm0.pte) +#define CC_USING_PATCHABLE_FUNCTION_ENTRY +#define RO_EXCEPTION_TABLE_ALIGN 8 + #include <asm-generic/vmlinux.lds.h> /* needed for the processor specific cache alignment size */ @@ -35,6 +38,15 @@ OUTPUT_FORMAT("elf64-hppa-linux") OUTPUT_ARCH(hppa:hppa2.0w) #endif +#define EXIT_TEXT_SECTIONS() .exit.text : { EXIT_TEXT } +#if !defined(CONFIG_64BIT) || defined(CONFIG_MLONGCALLS) +#define MLONGCALL_KEEP(x) +#define MLONGCALL_DISCARD(x) x +#else +#define MLONGCALL_KEEP(x) x +#define MLONGCALL_DISCARD(x) +#endif + ENTRY(parisc_kernel_start) #ifndef CONFIG_64BIT jiffies = jiffies_64 + 4; @@ -47,15 +59,11 @@ SECTIONS __init_begin = .; HEAD_TEXT_SECTION - INIT_TEXT_SECTION(8) + MLONGCALL_DISCARD(INIT_TEXT_SECTION(8)) . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(PAGE_SIZE) - /* we have to discard exit text and such at runtime, not link time */ - .exit.text : - { - EXIT_TEXT - } + MLONGCALL_DISCARD(EXIT_TEXT_SECTIONS()) .exit.data : { EXIT_DATA @@ -73,11 +81,11 @@ SECTIONS _text = .; /* Text and read-only data */ _stext = .; + MLONGCALL_KEEP(INIT_TEXT_SECTION(8)) .text ALIGN(PAGE_SIZE) : { TEXT_TEXT - SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT + SCHED_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT @@ -92,6 +100,7 @@ SECTIONS *(.lock.text) /* out-of-line lock text */ *(.gnu.warning) } + MLONGCALL_KEEP(EXIT_TEXT_SECTIONS()) . = ALIGN(PAGE_SIZE); _etext = .; /* End of text section */ @@ -100,7 +109,7 @@ SECTIONS _sdata = .; /* Architecturally we need to keep __gp below 0x1000000 and thus - * in front of RO_DATA_SECTION() which stores lots of tracepoint + * in front of RO_DATA() which stores lots of tracepoint * and ftrace symbols. */ #ifdef CONFIG_64BIT . = ALIGN(16); @@ -118,13 +127,10 @@ SECTIONS } #endif - RO_DATA_SECTION(8) - - /* RO because of BUILDTIME_EXTABLE_SORT */ - EXCEPTION_TABLE(8) - NOTES + RO_DATA(PAGE_SIZE) /* unwind info */ + . = ALIGN(4); .PARISC.unwind : { __start___unwind = .; *(.PARISC.unwind) @@ -140,7 +146,7 @@ SECTIONS data_start = .; /* Data */ - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); @@ -149,6 +155,7 @@ SECTIONS } /* End of data section */ + . = ALIGN(PAGE_SIZE); _edata = .; /* BSS */ @@ -158,6 +165,7 @@ SECTIONS _end = . ; STABS_DEBUG + ELF_DETAILS .note 0 : { *(.note) } /* Sections to be discarded */ diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index f2dac4d73b1b..7b197667faf6 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for parisc-specific library files # diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..9df810050642 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -18,7 +18,7 @@ arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { #endif #ifdef CONFIG_64BIT -unsigned long __xchg64(unsigned long x, unsigned long *ptr) +unsigned long notrace __xchg64(unsigned long x, volatile unsigned long *ptr) { unsigned long temp, flags; @@ -30,7 +30,7 @@ unsigned long __xchg64(unsigned long x, unsigned long *ptr) } #endif -unsigned long __xchg32(int x, int *ptr) +unsigned long notrace __xchg32(int x, volatile int *ptr) { unsigned long flags; long temp; @@ -43,7 +43,7 @@ unsigned long __xchg32(int x, int *ptr) } -unsigned long __xchg8(char x, char *ptr) +unsigned long notrace __xchg8(char x, volatile char *ptr) { unsigned long flags; long temp; @@ -56,26 +56,20 @@ unsigned long __xchg8(char x, char *ptr) } -u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) -{ - unsigned long flags; - u64 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} +#define CMPXCHG(T) \ + T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \ + { \ + unsigned long flags; \ + T prev; \ + \ + _atomic_spin_lock_irqsave(ptr, flags); \ + if ((prev = *ptr) == old) \ + *ptr = new; \ + _atomic_spin_unlock_irqrestore(ptr, flags); \ + return prev; \ + } -unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) -{ - unsigned long flags; - unsigned int prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return (unsigned long)prev; -} +CMPXCHG(u64) +CMPXCHG(u32) +CMPXCHG(u16) +CMPXCHG(u8) diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index ba6384da6ade..59d8c15d81bd 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket @@ -8,11 +9,6 @@ * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de> * Lots of code moved from tcp.c and ip.c; see those files * for more names. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> @@ -29,15 +25,6 @@ : "=r"(_t) \ : "r"(_r), "0"(_t)); -static inline unsigned short from32to16(unsigned int x) -{ - /* 32 bits --> 16 bits + carry */ - x = (x & 0xffff) + (x >> 16); - /* 16 bits + carry --> 16 bits including carry */ - x = (x & 0xffff) + (x >> 16); - return (unsigned short)x; -} - static inline unsigned int do_csum(const unsigned char * buff, int len) { int odd, count; @@ -89,7 +76,7 @@ static inline unsigned int do_csum(const unsigned char * buff, int len) } if (len & 1) result += le16_to_cpu(*buff); - result = from32to16(result); + result = csum_from32to16(result); if (odd) result = swab16(result); out: @@ -106,44 +93,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); addc(result, sum); - return (__force __wsum)from32to16(result); + return (__force __wsum)csum_from32to16(result); } EXPORT_SYMBOL(csum_partial); - -/* - * copy while checksumming, otherwise like csum_partial - */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) -{ - /* - * It's 2:30 am and I don't feel like doing it real ... - * This is lots slower than the real thing (tm) - */ - sum = csum_partial(src, len, sum); - memcpy(dst, src, len); - - return sum; -} -EXPORT_SYMBOL(csum_partial_copy_nocheck); - -/* - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. - */ -__wsum csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *err_ptr) -{ - int missing; - - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } - - return csum_partial(dst, len, sum); -} -EXPORT_SYMBOL(csum_partial_copy_from_user); diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c index 7c00496b47d4..3c7e617f5a93 100644 --- a/arch/parisc/lib/io.c +++ b/arch/parisc/lib/io.c @@ -12,114 +12,6 @@ #include <linux/module.h> #include <asm/io.h> -/* Copies a block of memory to a device in an efficient manner. - * Assumes the device can cope with 32-bit transfers. If it can't, - * don't use this function. - */ -void memcpy_toio(volatile void __iomem *dst, const void *src, int count) -{ - if (((unsigned long)dst & 3) != ((unsigned long)src & 3)) - goto bytecopy; - while ((unsigned long)dst & 3) { - writeb(*(char *)src, dst++); - src++; - count--; - } - while (count > 3) { - __raw_writel(*(u32 *)src, dst); - src += 4; - dst += 4; - count -= 4; - } - bytecopy: - while (count--) { - writeb(*(char *)src, dst++); - src++; - } -} - -/* -** Copies a block of memory from a device in an efficient manner. -** Assumes the device can cope with 32-bit transfers. If it can't, -** don't use this function. -** -** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM: -** 27341/64 = 427 cyc per int -** 61311/128 = 478 cyc per short -** 122637/256 = 479 cyc per byte -** Ergo bus latencies dominant (not transfer size). -** Minimize total number of transfers at cost of CPU cycles. -** TODO: only look at src alignment and adjust the stores to dest. -*/ -void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) -{ - /* first compare alignment of src/dst */ - if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) ) - goto bytecopy; - - if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) ) - goto shortcopy; - - /* Then check for misaligned start address */ - if ((unsigned long)src & 1) { - *(u8 *)dst = readb(src); - src++; - dst++; - count--; - if (count < 2) goto bytecopy; - } - - if ((unsigned long)src & 2) { - *(u16 *)dst = __raw_readw(src); - src += 2; - dst += 2; - count -= 2; - } - - while (count > 3) { - *(u32 *)dst = __raw_readl(src); - dst += 4; - src += 4; - count -= 4; - } - - shortcopy: - while (count > 1) { - *(u16 *)dst = __raw_readw(src); - src += 2; - dst += 2; - count -= 2; - } - - bytecopy: - while (count--) { - *(char *)dst = readb(src); - src++; - dst++; - } -} - -/* Sets a block of memory on a device to a given value. - * Assumes the device can cope with 32-bit transfers. If it can't, - * don't use this function. - */ -void memset_io(volatile void __iomem *addr, unsigned char val, int count) -{ - u32 val32 = (val << 24) | (val << 16) | (val << 8) | val; - while ((unsigned long)addr & 3) { - writeb(val, addr++); - count--; - } - while (count > 3) { - __raw_writel(val32, addr); - addr += 4; - count -= 4; - } - while (count--) { - writeb(val, addr++); - } -} - /* * Read COUNT 8-bit bytes from port PORT into memory starting at * SRC. @@ -170,15 +62,15 @@ void insw (unsigned long port, void *dst, unsigned long count) unsigned char *p; p = (unsigned char *)dst; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ while (count>=2) { - + count -= 2; l = cpu_to_le16(inw(port)) << 16; l |= cpu_to_le16(inw(port)); @@ -189,13 +81,13 @@ void insw (unsigned long port, void *dst, unsigned long count) *(unsigned short *)p = cpu_to_le16(inw(port)); } break; - + case 0x02: /* Buffer 16-bit aligned */ *(unsigned short *)p = cpu_to_le16(inw(port)); p += 2; count--; while (count>=2) { - + count -= 2; l = cpu_to_le16(inw(port)) << 16; l |= cpu_to_le16(inw(port)); @@ -206,13 +98,13 @@ void insw (unsigned long port, void *dst, unsigned long count) *(unsigned short *)p = cpu_to_le16(inw(port)); } break; - + case 0x01: /* Buffer 8-bit aligned */ case 0x03: /* I don't bother with 32bit transfers * in this case, 16bit will have to do -- DE */ --count; - + l = cpu_to_le16(inw(port)); *p = l >> 8; p++; @@ -242,10 +134,10 @@ void insl (unsigned long port, void *dst, unsigned long count) unsigned char *p; p = (unsigned char *)dst; - + if (!count) return; - + switch (((unsigned long) dst) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -255,14 +147,14 @@ void insl (unsigned long port, void *dst, unsigned long count) p += 4; } break; - + case 0x02: /* Buffer 16-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *(unsigned short *)p = l >> 16; p += 2; - + while (count--) { l2 = cpu_to_le32(inl(port)); @@ -274,7 +166,7 @@ void insl (unsigned long port, void *dst, unsigned long count) break; case 0x01: /* Buffer 8-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *(unsigned char *)p = l >> 24; p++; @@ -291,7 +183,7 @@ void insl (unsigned long port, void *dst, unsigned long count) break; case 0x03: /* Buffer 8-bit aligned */ --count; - + l = cpu_to_le32(inl(port)); *p = l >> 24; p++; @@ -340,10 +232,10 @@ void outsw (unsigned long port, const void *src, unsigned long count) const unsigned char *p; p = (const unsigned char *)src; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -358,13 +250,13 @@ void outsw (unsigned long port, const void *src, unsigned long count) outw(le16_to_cpu(*(unsigned short*)p), port); } break; - + case 0x02: /* Buffer 16-bit aligned */ - + outw(le16_to_cpu(*(unsigned short*)p), port); p += 2; count--; - + while (count>=2) { count -= 2; l = *(unsigned int *)p; @@ -376,11 +268,11 @@ void outsw (unsigned long port, const void *src, unsigned long count) outw(le16_to_cpu(*(unsigned short *)p), port); } break; - - case 0x01: /* Buffer 8-bit aligned */ + + case 0x01: /* Buffer 8-bit aligned */ /* I don't bother with 32bit transfers * in this case, 16bit will have to do -- DE */ - + l = *p << 8; p++; count--; @@ -395,7 +287,7 @@ void outsw (unsigned long port, const void *src, unsigned long count) l2 = *(unsigned char *)p; outw (le16_to_cpu(l | l2>>8), port); break; - + } } @@ -412,10 +304,10 @@ void outsl (unsigned long port, const void *src, unsigned long count) const unsigned char *p; p = (const unsigned char *)src; - + if (!count) return; - + switch (((unsigned long)p) & 0x3) { case 0x00: /* Buffer 32-bit aligned */ @@ -425,13 +317,13 @@ void outsl (unsigned long port, const void *src, unsigned long count) p += 4; } break; - + case 0x02: /* Buffer 16-bit aligned */ --count; - + l = *(unsigned short *)p; p += 2; - + while (count--) { l2 = *(unsigned int *)p; @@ -462,7 +354,7 @@ void outsl (unsigned long port, const void *src, unsigned long count) break; case 0x03: /* Buffer 8-bit aligned */ --count; - + l = *p << 24; p++; diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 4b19e6e64fb7..915c0c4da663 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -43,19 +43,27 @@ #endif struct iomap_ops { - unsigned int (*read8)(void __iomem *); - unsigned int (*read16)(void __iomem *); - unsigned int (*read16be)(void __iomem *); - unsigned int (*read32)(void __iomem *); - unsigned int (*read32be)(void __iomem *); + unsigned int (*read8)(const void __iomem *); + unsigned int (*read16)(const void __iomem *); + unsigned int (*read16be)(const void __iomem *); + unsigned int (*read32)(const void __iomem *); + unsigned int (*read32be)(const void __iomem *); +#ifdef CONFIG_64BIT + u64 (*read64)(const void __iomem *); + u64 (*read64be)(const void __iomem *); +#endif void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); void (*write32)(u32, void __iomem *); void (*write32be)(u32, void __iomem *); - void (*read8r)(void __iomem *, void *, unsigned long); - void (*read16r)(void __iomem *, void *, unsigned long); - void (*read32r)(void __iomem *, void *, unsigned long); +#ifdef CONFIG_64BIT + void (*write64)(u64, void __iomem *); + void (*write64be)(u64, void __iomem *); +#endif + void (*read8r)(const void __iomem *, void *, unsigned long); + void (*read16r)(const void __iomem *, void *, unsigned long); + void (*read32r)(const void __iomem *, void *, unsigned long); void (*write8r)(void __iomem *, const void *, unsigned long); void (*write16r)(void __iomem *, const void *, unsigned long); void (*write32r)(void __iomem *, const void *, unsigned long); @@ -65,17 +73,17 @@ struct iomap_ops { #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff) -static unsigned int ioport_read8(void __iomem *addr) +static unsigned int ioport_read8(const void __iomem *addr) { return inb(ADDR2PORT(addr)); } -static unsigned int ioport_read16(void __iomem *addr) +static unsigned int ioport_read16(const void __iomem *addr) { return inw(ADDR2PORT(addr)); } -static unsigned int ioport_read32(void __iomem *addr) +static unsigned int ioport_read32(const void __iomem *addr) { return inl(ADDR2PORT(addr)); } @@ -95,17 +103,17 @@ static void ioport_write32(u32 datum, void __iomem *addr) outl(datum, ADDR2PORT(addr)); } -static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count) { insb(ADDR2PORT(addr), dst, count); } -static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count) { insw(ADDR2PORT(addr), dst, count); } -static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count) { insl(ADDR2PORT(addr), dst, count); } @@ -146,31 +154,43 @@ static const struct iomap_ops ioport_ops = { /* Legacy I/O memory ops */ -static unsigned int iomem_read8(void __iomem *addr) +static unsigned int iomem_read8(const void __iomem *addr) { return readb(addr); } -static unsigned int iomem_read16(void __iomem *addr) +static unsigned int iomem_read16(const void __iomem *addr) { return readw(addr); } -static unsigned int iomem_read16be(void __iomem *addr) +static unsigned int iomem_read16be(const void __iomem *addr) { return __raw_readw(addr); } -static unsigned int iomem_read32(void __iomem *addr) +static unsigned int iomem_read32(const void __iomem *addr) { return readl(addr); } -static unsigned int iomem_read32be(void __iomem *addr) +static unsigned int iomem_read32be(const void __iomem *addr) { return __raw_readl(addr); } +#ifdef CONFIG_64BIT +static u64 iomem_read64(const void __iomem *addr) +{ + return readq(addr); +} + +static u64 iomem_read64be(const void __iomem *addr) +{ + return __raw_readq(addr); +} +#endif + static void iomem_write8(u8 datum, void __iomem *addr) { writeb(datum, addr); @@ -196,7 +216,19 @@ static void iomem_write32be(u32 datum, void __iomem *addr) __raw_writel(datum, addr); } -static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) +#ifdef CONFIG_64BIT +static void iomem_write64(u64 datum, void __iomem *addr) +{ + writeq(datum, addr); +} + +static void iomem_write64be(u64 datum, void __iomem *addr) +{ + __raw_writeq(datum, addr); +} +#endif + +static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u8 *)dst = __raw_readb(addr); @@ -204,7 +236,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u16 *)dst = __raw_readw(addr); @@ -212,7 +244,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u32 *)dst = __raw_readl(addr); @@ -250,11 +282,19 @@ static const struct iomap_ops iomem_ops = { .read16be = iomem_read16be, .read32 = iomem_read32, .read32be = iomem_read32be, +#ifdef CONFIG_64BIT + .read64 = iomem_read64, + .read64be = iomem_read64be, +#endif .write8 = iomem_write8, .write16 = iomem_write16, .write16be = iomem_write16be, .write32 = iomem_write32, .write32be = iomem_write32be, +#ifdef CONFIG_64BIT + .write64 = iomem_write64, + .write64be = iomem_write64be, +#endif .read8r = iomem_read8r, .read16r = iomem_read16r, .read32r = iomem_read32r, @@ -269,41 +309,57 @@ static const struct iomap_ops *iomap_ops[8] = { }; -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr); return *((u8 *)addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr); return le16_to_cpup((u16 *)addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr); return *((u16 *)addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr); return le32_to_cpup((u32 *)addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr); return *((u32 *)addr); } +#ifdef CONFIG_64BIT +u64 ioread64(const void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) + return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr); + return le64_to_cpup((u64 *)addr); +} + +u64 ioread64be(const void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) + return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); + return *((u64 *)addr); +} +#endif + void iowrite8(u8 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { @@ -349,9 +405,29 @@ void iowrite32be(u32 datum, void __iomem *addr) } } +#ifdef CONFIG_64BIT +void iowrite64(u64 datum, void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) { + iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr); + } else { + *((u64 *)addr) = cpu_to_le64(datum); + } +} + +void iowrite64be(u64 datum, void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) { + iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr); + } else { + *((u64 *)addr) = datum; + } +} +#endif + /* Repeating interfaces */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count); @@ -363,7 +439,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count); @@ -375,7 +451,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count); @@ -437,23 +513,34 @@ void ioport_unmap(void __iomem *addr) } } +#ifdef CONFIG_PCI void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (!INDIRECT_ADDR(addr)) { iounmap(addr); } } +EXPORT_SYMBOL(pci_iounmap); +#endif EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); +#ifdef CONFIG_64BIT +EXPORT_SYMBOL(ioread64); +EXPORT_SYMBOL(ioread64be); +#endif EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); +#ifdef CONFIG_64BIT +EXPORT_SYMBOL(iowrite64); +EXPORT_SYMBOL(iowrite64be); +#endif EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(ioread32_rep); @@ -462,4 +549,3 @@ EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); -EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index b53fb6fedf06..b428d29e45fb 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * User Space Access Routines * @@ -7,21 +8,6 @@ * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2017 Helge Deller <deller@gmx.de> * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -42,21 +28,6 @@ #include <linux/linkage.h> /* - * get_sr gets the appropriate space value into - * sr1 for kernel/user space access, depending - * on the flag stored in the task structure. - */ - - .macro get_sr - mfctl %cr30,%r1 - ldw TI_SEGMENT(%r1),%r22 - mfsp %sr3,%r1 - or,<> %r22,%r0,%r0 - copy %r0,%r1 - mtsp %r1,%sr1 - .endm - - /* * unsigned long lclear_user(void *to, unsigned long n) * * Returns 0 for success. @@ -65,10 +36,9 @@ ENTRY_CFI(lclear_user) comib,=,n 0,%r25,$lclu_done - get_sr $lclu_loop: addib,<> -1,%r25,$lclu_loop -1: stbs,ma %r0,1(%sr1,%r26) +1: stbs,ma %r0,1(%sr3,%r26) $lclu_done: bv %r0(%r2) @@ -81,40 +51,6 @@ $lclu_done: ENDPROC_CFI(lclear_user) - /* - * long lstrnlen_user(char *s, long n) - * - * Returns 0 if exception before zero byte or reaching N, - * N+1 if N would be exceeded, - * else strlen + 1 (i.e. includes zero byte). - */ - -ENTRY_CFI(lstrnlen_user) - comib,= 0,%r25,$lslen_nzero - copy %r26,%r24 - get_sr -1: ldbs,ma 1(%sr1,%r26),%r1 -$lslen_loop: - comib,=,n 0,%r1,$lslen_done - addib,<> -1,%r25,$lslen_loop -2: ldbs,ma 1(%sr1,%r26),%r1 -$lslen_done: - bv %r0(%r2) - sub %r26,%r24,%r28 - -$lslen_nzero: - b $lslen_done - ldo 1(%r26),%r26 /* special case for N == 0 */ - -3: b $lslen_done - copy %r24,%r26 /* reset r26 so 0 is returned on fault */ - - ASM_EXCEPTIONTABLE_ENTRY(1b,3b) - ASM_EXCEPTIONTABLE_ENTRY(2b,3b) - -ENDPROC_CFI(lstrnlen_user) - - /* * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) * diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 865a7f796c7f..03165c82dfdb 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -1,34 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung <tausq@debian.org> * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * Portions derived from the GNU C Library * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. - * */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/uaccess.h> +#include <linux/mm.h> -#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) -#define get_kernel_space() (0) +#define get_user_space() mfsp(SR_USER) +#define get_kernel_space() SR_KERNEL /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ extern unsigned long pa_memcpy(void *dst, const void *src, @@ -37,8 +24,8 @@ extern unsigned long pa_memcpy(void *dst, const void *src, unsigned long raw_copy_to_user(void __user *dst, const void *src, unsigned long len) { - mtsp(get_kernel_space(), 1); - mtsp(get_user_space(), 2); + mtsp(get_kernel_space(), SR_TEMP1); + mtsp(get_user_space(), SR_TEMP2); return pa_memcpy((void __force *)dst, src, len); } EXPORT_SYMBOL(raw_copy_to_user); @@ -46,39 +33,41 @@ EXPORT_SYMBOL(raw_copy_to_user); unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long len) { - mtsp(get_user_space(), 1); - mtsp(get_kernel_space(), 2); - return pa_memcpy(dst, (void __force *)src, len); -} -EXPORT_SYMBOL(raw_copy_from_user); + unsigned long start = (unsigned long) src; + unsigned long end = start + len; + unsigned long newlen = len; -unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len) -{ - mtsp(get_user_space(), 1); - mtsp(get_user_space(), 2); - return pa_memcpy((void __force *)dst, (void __force *)src, len); -} + mtsp(get_user_space(), SR_TEMP1); + mtsp(get_kernel_space(), SR_TEMP2); + /* Check region is user accessible */ + while (start < end) { + if (!prober_user(SR_TEMP1, start)) { + newlen = (start - (unsigned long) src); + break; + } + start += PAGE_SIZE; + /* align to page boundry which may have different permission */ + start = PAGE_ALIGN_DOWN(start); + } + return len - newlen + pa_memcpy(dst, (void __force *)src, newlen); +} +EXPORT_SYMBOL(raw_copy_from_user); void * memcpy(void * dst,const void *src, size_t count) { - mtsp(get_kernel_space(), 1); - mtsp(get_kernel_space(), 2); + mtsp(get_kernel_space(), SR_TEMP1); + mtsp(get_kernel_space(), SR_TEMP2); pa_memcpy(dst, src, count); return dst; } -EXPORT_SYMBOL(raw_copy_in_user); EXPORT_SYMBOL(memcpy); -long probe_kernel_read(void *dst, const void *src, size_t size) +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { - unsigned long addr = (unsigned long)src; - - if (addr < PAGE_SIZE) - return -EFAULT; - + if ((unsigned long)unsafe_src < PAGE_SIZE) + return false; /* check for I/O space F_EXTEND(0xfff00000) access as well? */ - - return __probe_kernel_read(dst, src, size); + return true; } diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c index 1d7929bd7642..133e4809859a 100644 --- a/arch/parisc/lib/memset.c +++ b/arch/parisc/lib/memset.c @@ -1,23 +1,4 @@ -/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ - -/* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ - +/* SPDX-License-Identifier: GPL-2.0-or-later */ #include <linux/types.h> #include <asm/string.h> diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c index 8e6014a142ef..9d8b4dbae273 100644 --- a/arch/parisc/lib/ucmpdi2.c +++ b/arch/parisc/lib/ucmpdi2.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> +#include <linux/libgcc.h> union ull_union { unsigned long long ull; @@ -9,7 +10,7 @@ union ull_union { } ui; }; -int __ucmpdi2(unsigned long long a, unsigned long long b) +word_type __ucmpdi2(unsigned long long a, unsigned long long b) { union ull_union au = {.ull = a}; union ull_union bu = {.ull = b}; diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile index b6c4b254901a..7b64740e150a 100644 --- a/arch/parisc/math-emu/Makefile +++ b/arch/parisc/math-emu/Makefile @@ -6,7 +6,8 @@ # See arch/parisc/math-emu/README ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \ -Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \ - -Wno-implicit-int + -Wno-implicit-int -Wno-missing-prototypes -Wno-missing-declarations \ + -Wno-old-style-definition -Wno-unused-but-set-variable obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \ dfmpy.o sfmpy.o sfsqrt.o dfsqrt.o dfadd.o fmpyfadd.o \ @@ -18,3 +19,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \ # other very old or stripped-down PA-RISC CPUs -- not currently supported obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o +CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 933423fa5144..ef783a383c5a 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef __NO_PA_HDRS @@ -60,19 +47,19 @@ ((exponent < (SGL_P - 1)) ? \ (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE) -#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH) +#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0) #define Sgl_roundnearest_from_int(int_value,sgl_value) \ if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \ - if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \ + if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \ Sall(sgl_value)++ #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \ - ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB)) + (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB)) #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \ if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \ - if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \ + if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \ Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++ #define Dint_isinexact_to_dbl(dint_value) \ diff --git a/arch/parisc/math-emu/dbl_float.h b/arch/parisc/math-emu/dbl_float.h index 0c2fa9a951b3..e57fee8e11ba 100644 --- a/arch/parisc/math-emu/dbl_float.h +++ b/arch/parisc/math-emu/dbl_float.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef __NO_PA_HDRS PA header file -- do not include this header file for non-PA builds. diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 04e550e76ae8..d41ddb3430b5 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -59,7 +46,7 @@ #define SIGNALCODE(signal, code) ((signal) << 24 | (code)) #define copropbit 1<<31-2 /* bit position 2 */ #define opclass 9 /* bits 21 & 22 */ -#define fmt 11 /* bits 19 & 20 */ +#define fmtbits 11 /* bits 19 & 20 */ #define df 13 /* bits 17 & 18 */ #define twobits 3 /* mask low-order 2 bits */ #define fivebits 31 /* mask low-order 5 bits */ @@ -70,7 +57,7 @@ #define Excp_instr(index) Instructionfield(Fpu_register[index]) #define Clear_excp_register(index) Allexception(Fpu_register[index]) = 0 #define Excp_format() \ - (current_ir >> ((current_ir>>opclass & twobits)==1 ? df : fmt) & twobits) + (current_ir >> ((current_ir>>opclass & twobits) == 1 ? df : fmtbits) & twobits) /* Miscellaneous definitions */ #define Fpu_sgl(index) Fpu_register[index*2] @@ -115,7 +102,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) * that happen. Want to keep this overhead low, but still provide * some information to the customer. All exits from this routine * need to restore Fpu_register[0] - */ + */ bflags=(Fpu_register[0] & 0xf8000000); Fpu_register[0] &= 0x07ffffff; diff --git a/arch/parisc/math-emu/denormal.c b/arch/parisc/math-emu/denormal.c index 60687e13c34f..7f1a60d59f21 100644 --- a/arch/parisc/math-emu/denormal.c +++ b/arch/parisc/math-emu/denormal.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/dfadd.c b/arch/parisc/math-emu/dfadd.c index d37e2d2cb6fe..00e561d4aa55 100644 --- a/arch/parisc/math-emu/dfadd.c +++ b/arch/parisc/math-emu/dfadd.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -266,7 +253,7 @@ dbl_fadd( return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias - * with denomalized numbers. */ + * with denormalized numbers. */ } else { diff --git a/arch/parisc/math-emu/dfcmp.c b/arch/parisc/math-emu/dfcmp.c index 59521267ffcc..ae4b49744bf6 100644 --- a/arch/parisc/math-emu/dfcmp.c +++ b/arch/parisc/math-emu/dfcmp.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/dfdiv.c b/arch/parisc/math-emu/dfdiv.c index d7d4bec0e76f..239150dbe088 100644 --- a/arch/parisc/math-emu/dfdiv.c +++ b/arch/parisc/math-emu/dfdiv.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/dfmpy.c b/arch/parisc/math-emu/dfmpy.c index 4380f5a62ad1..87e0ce849967 100644 --- a/arch/parisc/math-emu/dfmpy.c +++ b/arch/parisc/math-emu/dfmpy.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/dfrem.c b/arch/parisc/math-emu/dfrem.c index b98378534677..9243a5954d08 100644 --- a/arch/parisc/math-emu/dfrem.c +++ b/arch/parisc/math-emu/dfrem.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/dfsqrt.c b/arch/parisc/math-emu/dfsqrt.c index 9542c6d281a5..e3a3a19b966b 100644 --- a/arch/parisc/math-emu/dfsqrt.c +++ b/arch/parisc/math-emu/dfsqrt.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -28,7 +15,7 @@ * Double Floating-point Square Root * * External Interfaces: - * dbl_fsqrt(srcptr,nullptr,dstptr,status) + * dbl_fsqrt(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -50,7 +37,7 @@ unsigned int dbl_fsqrt( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/dfsub.c b/arch/parisc/math-emu/dfsub.c index 2e8b5a79bff7..4f03782284bd 100644 --- a/arch/parisc/math-emu/dfsub.c +++ b/arch/parisc/math-emu/dfsub.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -269,7 +256,7 @@ dbl_fsub( return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias - * with denomalized numbers. */ + * with denormalized numbers. */ } else { diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 0590e05571d1..71829cb7bc81 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * linux/arch/math-emu/driver.c.c @@ -39,12 +26,6 @@ #define FPUDEBUG 0 -/* Format of the floating-point exception registers. */ -struct exc_reg { - unsigned int exception : 6; - unsigned int ei : 26; -}; - /* Macros for grabbing bits of the instruction format from the 'ei' field above. */ /* Major opcode 0c and 0e */ @@ -116,9 +97,19 @@ handle_fpe(struct pt_regs *regs) memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { - force_sig_fault(signalcode >> 24, signalcode & 0xffffff, - (void __user *) regs->iaoq[0], current); - return -1; + int sig = signalcode >> 24; + + if (sig == SIGFPE) { + /* + * Clear floating point trap bit to avoid trapping + * again on the first floating-point instruction in + * the userspace signal handler. + */ + regs->fr[0] &= ~(1ULL << 38); + } + force_sig_fault(sig, signalcode & 0xffffff, + (void __user *) regs->iaoq[0]); + return -1; } return signalcode ? -1 : 0; diff --git a/arch/parisc/math-emu/fcnvff.c b/arch/parisc/math-emu/fcnvff.c index f9357d9d4cb1..61e489704c86 100644 --- a/arch/parisc/math-emu/fcnvff.c +++ b/arch/parisc/math-emu/fcnvff.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -29,8 +16,8 @@ * Double Floating-point to Single Floating-point * * External Interfaces: - * dbl_to_sgl_fcnvff(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvff(srcptr,nullptr,dstptr,status) + * dbl_to_sgl_fcnvff(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvff(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -53,7 +40,7 @@ int sgl_to_dbl_fcnvff( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { @@ -140,7 +127,7 @@ sgl_to_dbl_fcnvff( int dbl_to_sgl_fcnvff( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/fcnvfu.c b/arch/parisc/math-emu/fcnvfu.c index 7e8565537bf5..c31790ceecca 100644 --- a/arch/parisc/math-emu/fcnvfu.c +++ b/arch/parisc/math-emu/fcnvfu.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -28,10 +15,10 @@ * Floating-point to Unsigned Fixed-point Converts * * External Interfaces: - * dbl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvfu(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvfu(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvfu(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvfu(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -58,7 +45,7 @@ int sgl_to_sgl_fcnvfu( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, unsigned int *dstptr, unsigned int *status) { @@ -179,7 +166,7 @@ sgl_to_sgl_fcnvfu( int sgl_to_dbl_fcnvfu( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_unsigned *dstptr, unsigned int *status) { @@ -298,7 +285,7 @@ sgl_to_dbl_fcnvfu( */ /*ARGSUSED*/ int -dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr, +dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *_nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, result; @@ -421,7 +408,7 @@ dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr, */ /*ARGSUSED*/ int -dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr, +dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *_nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; diff --git a/arch/parisc/math-emu/fcnvfut.c b/arch/parisc/math-emu/fcnvfut.c index 4176a44ed750..2cf1daf3b7ad 100644 --- a/arch/parisc/math-emu/fcnvfut.c +++ b/arch/parisc/math-emu/fcnvfut.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -28,10 +15,10 @@ * Floating-point to Unsigned Fixed-point Converts with Truncation * * External Interfaces: - * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvfut(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvfut(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvfut(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvfut(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -57,7 +44,7 @@ */ /*ARGSUSED*/ int -sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, +sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *_nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int src, result; @@ -126,7 +113,7 @@ sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, */ /*ARGSUSED*/ int -sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, +sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *_nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; @@ -196,7 +183,7 @@ sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, */ /*ARGSUSED*/ int -dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, +dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *_nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, result; @@ -265,7 +252,7 @@ dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, */ /*ARGSUSED*/ int -dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, +dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *_nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; diff --git a/arch/parisc/math-emu/fcnvfx.c b/arch/parisc/math-emu/fcnvfx.c index d6475bddb507..99bd61479452 100644 --- a/arch/parisc/math-emu/fcnvfx.c +++ b/arch/parisc/math-emu/fcnvfx.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -31,10 +18,10 @@ * Double Floating-point to Double Fixed-point * * External Interfaces: - * dbl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvfx(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvfx(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvfx(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvfx(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -57,7 +44,7 @@ int sgl_to_sgl_fcnvfx( sgl_floating_point *srcptr, - sgl_floating_point *nullptr, + sgl_floating_point *_nullptr, int *dstptr, sgl_floating_point *status) { @@ -154,7 +141,7 @@ sgl_to_sgl_fcnvfx( int sgl_to_dbl_fcnvfx( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_integer *dstptr, unsigned int *status) { @@ -275,7 +262,7 @@ sgl_to_dbl_fcnvfx( int dbl_to_sgl_fcnvfx( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, int *dstptr, unsigned int *status) { @@ -386,7 +373,7 @@ dbl_to_sgl_fcnvfx( int dbl_to_dbl_fcnvfx( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_integer *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/fcnvfxt.c b/arch/parisc/math-emu/fcnvfxt.c index 8b9010c4690e..3b7cc62257d0 100644 --- a/arch/parisc/math-emu/fcnvfxt.c +++ b/arch/parisc/math-emu/fcnvfxt.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -31,10 +18,10 @@ * Double Floating-point to Double Fixed-point /w truncated result * * External Interfaces: - * dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvfxt(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvfxt(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvfxt(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvfxt(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -58,7 +45,7 @@ int sgl_to_sgl_fcnvfxt( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, int *dstptr, unsigned int *status) { @@ -122,7 +109,7 @@ sgl_to_sgl_fcnvfxt( int sgl_to_dbl_fcnvfxt( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_integer *dstptr, unsigned int *status) { @@ -196,7 +183,7 @@ sgl_to_dbl_fcnvfxt( int dbl_to_sgl_fcnvfxt( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, int *dstptr, unsigned int *status) { @@ -261,7 +248,7 @@ dbl_to_sgl_fcnvfxt( int dbl_to_dbl_fcnvfxt( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_integer *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/fcnvuf.c b/arch/parisc/math-emu/fcnvuf.c index 5e68189fee22..c166feb57045 100644 --- a/arch/parisc/math-emu/fcnvuf.c +++ b/arch/parisc/math-emu/fcnvuf.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -28,10 +15,10 @@ * Fixed point to Floating-point Converts * * External Interfaces: - * dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvuf(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvuf(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvuf(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvuf(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -58,7 +45,7 @@ int sgl_to_sgl_fcnvuf( unsigned int *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { @@ -117,7 +104,7 @@ sgl_to_sgl_fcnvuf( int sgl_to_dbl_fcnvuf( unsigned int *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { @@ -158,7 +145,7 @@ sgl_to_dbl_fcnvuf( int dbl_to_sgl_fcnvuf( dbl_unsigned *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { @@ -240,7 +227,7 @@ dbl_to_sgl_fcnvuf( int dbl_to_dbl_fcnvuf( dbl_unsigned *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/fcnvxf.c b/arch/parisc/math-emu/fcnvxf.c index 05c7fadb3729..11bc1e8a13aa 100644 --- a/arch/parisc/math-emu/fcnvxf.c +++ b/arch/parisc/math-emu/fcnvxf.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -31,10 +18,10 @@ * Double Fixed-point to Double Floating-point * * External Interfaces: - * dbl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status) - * dbl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status) - * sgl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status) - * sgl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status) + * dbl_to_dbl_fcnvxf(srcptr,_nullptr,dstptr,status) + * dbl_to_sgl_fcnvxf(srcptr,_nullptr,dstptr,status) + * sgl_to_dbl_fcnvxf(srcptr,_nullptr,dstptr,status) + * sgl_to_sgl_fcnvxf(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -57,7 +44,7 @@ int sgl_to_sgl_fcnvxf( int *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { @@ -128,7 +115,7 @@ sgl_to_sgl_fcnvxf( int sgl_to_dbl_fcnvxf( int *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { @@ -179,7 +166,7 @@ sgl_to_dbl_fcnvxf( int dbl_to_sgl_fcnvxf( dbl_integer *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { @@ -284,7 +271,7 @@ dbl_to_sgl_fcnvxf( int dbl_to_dbl_fcnvxf( dbl_integer *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/float.h b/arch/parisc/math-emu/float.h index 7a51f97e72e6..531bbda5e31f 100644 --- a/arch/parisc/math-emu/float.h +++ b/arch/parisc/math-emu/float.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/fmpyfadd.c b/arch/parisc/math-emu/fmpyfadd.c index b067c45c872d..029f9bbfe7b5 100644 --- a/arch/parisc/math-emu/fmpyfadd.c +++ b/arch/parisc/math-emu/fmpyfadd.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/fpbits.h b/arch/parisc/math-emu/fpbits.h index cefad064d74a..b46bddb9a513 100644 --- a/arch/parisc/math-emu/fpbits.h +++ b/arch/parisc/math-emu/fpbits.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef __NO_PA_HDRS diff --git a/arch/parisc/math-emu/fpu.h b/arch/parisc/math-emu/fpu.h index 0af5c3c88949..dec951d40286 100644 --- a/arch/parisc/math-emu/fpu.h +++ b/arch/parisc/math-emu/fpu.h @@ -1,51 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* - * BEGIN_DESC - * - * File: - * @(#) pa/fp/fpu.h $Revision: 1.1 $ - * - * Purpose: - * <<please update with a synopis of the functionality provided by this file>> - * - * - * END_DESC -*/ - -#ifdef __NO_PA_HDRS - PA header file -- do not include this header file for non-PA builds. -#endif - #ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */ #define _MACHINE_FPU_INCLUDED -#if 0 -#ifndef _SYS_STDSYMS_INCLUDED -# include <sys/stdsyms.h> -#endif /* _SYS_STDSYMS_INCLUDED */ -#include <machine/pdc/pdc_rqsts.h> -#endif - #define PA83_FPU_FLAG 0x00000001 #define PA89_FPU_FLAG 0x00000002 #define PA2_0_FPU_FLAG 0x00000010 @@ -56,21 +19,19 @@ #define COPR_FP 0x00000080 /* Floating point -- Coprocessor 0 */ #define SFU_MPY_DIVIDE 0x00008000 /* Multiply/Divide __ SFU 0 */ - #define EM_FPU_TYPE_OFFSET 272 /* version of EMULATION software for COPR,0,0 instruction */ #define EMULATION_VERSION 4 /* - * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T) - * is thorough the potential type field from the PDC_MODEL call. The - * following flags are used at assist this differeniation. + * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T) + * is through the potential type field from the PDC_MODEL call. + * The following flags are used to assist this differentiation. */ #define ROLEX_POTENTIAL_KEY_FLAGS PDC_MODEL_CPU_KEY_WORD_TO_IO #define TIMEX_POTENTIAL_KEY_FLAGS (PDC_MODEL_CPU_KEY_QUAD_STORE | \ PDC_MODEL_CPU_KEY_RECIP_SQRT) - #endif /* ! _MACHINE_FPU_INCLUDED */ diff --git a/arch/parisc/math-emu/fpudispatch.c b/arch/parisc/math-emu/fpudispatch.c index 18df1237c93c..01ed133227c2 100644 --- a/arch/parisc/math-emu/fpudispatch.c +++ b/arch/parisc/math-emu/fpudispatch.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -323,12 +310,15 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; + fallthrough; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } + BUG(); case 3: /* FABS */ switch (fmt) { case 2: /* illegal */ @@ -338,13 +328,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; + fallthrough; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ /* copy and clear sign bit */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } + BUG(); case 6: /* FNEG */ switch (fmt) { case 2: /* illegal */ @@ -354,13 +347,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; + fallthrough; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ /* copy and invert sign bit */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } + BUG(); case 7: /* FNEGABS */ switch (fmt) { case 2: /* illegal */ @@ -370,13 +366,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; + fallthrough; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ /* copy and set sign bit */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } + BUG(); case 4: /* FSQRT */ switch (fmt) { case 0: @@ -389,6 +388,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 5: /* FRND */ switch (fmt) { case 0: @@ -402,7 +402,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(MAJOR_0C_EXCP); } } /* end of switch (subop) */ - + BUG(); case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ if ((df & 2) || (fmt & 2)) { @@ -432,6 +432,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* dbl/dbl */ return(MAJOR_0C_EXCP); } + BUG(); case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ @@ -447,6 +448,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ @@ -462,6 +464,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ @@ -477,6 +480,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -492,6 +496,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -507,6 +512,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -522,10 +528,11 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ - + BUG(); case 2: /* class 2 */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int); @@ -603,6 +610,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 1: /* FTEST */ switch (fmt) { case 0: @@ -622,8 +630,10 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: return(MAJOR_0C_EXCP); } + BUG(); } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ + BUG(); case 3: /* class 3 */ r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int); if (r2 == 0) @@ -646,6 +656,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 1: /* FSUB */ switch (fmt) { case 0: @@ -658,6 +669,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 2: /* FMPY */ switch (fmt) { case 0: @@ -670,6 +682,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 3: /* FDIV */ switch (fmt) { case 0: @@ -682,6 +695,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); case 4: /* FREM */ switch (fmt) { case 0: @@ -694,6 +708,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } + BUG(); } /* end of class 3 switch */ } /* end of switch(class) */ @@ -749,10 +764,12 @@ u_int fpregs[]; return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } + BUG(); case 3: /* FABS */ switch (fmt) { case 2: @@ -760,10 +777,12 @@ u_int fpregs[]; return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } + BUG(); case 6: /* FNEG */ switch (fmt) { case 2: @@ -771,10 +790,12 @@ u_int fpregs[]; return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } + BUG(); case 7: /* FNEGABS */ switch (fmt) { case 2: @@ -782,10 +803,12 @@ u_int fpregs[]; return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; + fallthrough; case 0: /* single */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } + BUG(); case 4: /* FSQRT */ switch (fmt) { case 0: @@ -798,6 +821,7 @@ u_int fpregs[]; case 3: return(MAJOR_0E_EXCP); } + BUG(); case 5: /* FRMD */ switch (fmt) { case 0: @@ -811,7 +835,7 @@ u_int fpregs[]; return(MAJOR_0E_EXCP); } } /* end of switch (subop */ - + BUG(); case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ /* @@ -839,6 +863,7 @@ u_int fpregs[]; case 3: /* dbl/dbl */ return(MAJOR_0E_EXCP); } + BUG(); case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ @@ -854,6 +879,7 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ @@ -869,6 +895,7 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ @@ -884,6 +911,7 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -899,6 +927,7 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -914,6 +943,7 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ @@ -929,9 +959,11 @@ u_int fpregs[]; return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } + BUG(); case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ + BUG(); case 2: /* class 2 */ /* * Be careful out there. @@ -1007,6 +1039,7 @@ u_int fpregs[]; } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ + BUG(); case 3: /* class 3 */ /* * Be careful out there. @@ -1039,6 +1072,7 @@ u_int fpregs[]; return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } + BUG(); case 1: /* FSUB */ switch (fmt) { case 0: @@ -1048,6 +1082,7 @@ u_int fpregs[]; return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } + BUG(); case 2: /* FMPY or XMPYU */ /* * check for integer multiply (x bit set) @@ -1084,6 +1119,7 @@ u_int fpregs[]; &fpregs[r2],&fpregs[t],status)); } } + BUG(); case 3: /* FDIV */ switch (fmt) { case 0: @@ -1093,6 +1129,7 @@ u_int fpregs[]; return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } + BUG(); case 4: /* FREM */ switch (fmt) { case 0: diff --git a/arch/parisc/math-emu/frnd.c b/arch/parisc/math-emu/frnd.c index 904b3844bf26..825d89650c2d 100644 --- a/arch/parisc/math-emu/frnd.c +++ b/arch/parisc/math-emu/frnd.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -27,8 +14,8 @@ * Quad Floating-point Round to Integer (returns unimplemented) * * External Interfaces: - * dbl_frnd(srcptr,nullptr,dstptr,status) - * sgl_frnd(srcptr,nullptr,dstptr,status) + * dbl_frnd(srcptr,_nullptr,dstptr,status) + * sgl_frnd(srcptr,_nullptr,dstptr,status) * * END_DESC */ @@ -46,7 +33,7 @@ /*ARGSUSED*/ int sgl_frnd(sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { @@ -151,7 +138,7 @@ sgl_frnd(sgl_floating_point *srcptr, int dbl_frnd( dbl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, dbl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/hppa.h b/arch/parisc/math-emu/hppa.h index 5d3d52f7323a..b25b266c9986 100644 --- a/arch/parisc/math-emu/hppa.h +++ b/arch/parisc/math-emu/hppa.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef __NO_PA_HDRS diff --git a/arch/parisc/math-emu/math-emu.h b/arch/parisc/math-emu/math-emu.h index 3a99f5929291..a7b6fac166b6 100644 --- a/arch/parisc/math-emu/math-emu.h +++ b/arch/parisc/math-emu/math-emu.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _PARISC_MATH_EMU_H #define _PARISC_MATH_EMU_H diff --git a/arch/parisc/math-emu/sfadd.c b/arch/parisc/math-emu/sfadd.c index f802cd6c7869..9b98c874dfac 100644 --- a/arch/parisc/math-emu/sfadd.c +++ b/arch/parisc/math-emu/sfadd.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -262,7 +249,7 @@ sgl_fadd( return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias - * with denomalized numbers. */ + * with denormalized numbers. */ } else { diff --git a/arch/parisc/math-emu/sfcmp.c b/arch/parisc/math-emu/sfcmp.c index 1466fb46e4c0..4a708f6c636e 100644 --- a/arch/parisc/math-emu/sfcmp.c +++ b/arch/parisc/math-emu/sfcmp.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/sfdiv.c b/arch/parisc/math-emu/sfdiv.c index 3e2a4d6daa9c..f1b439365798 100644 --- a/arch/parisc/math-emu/sfdiv.c +++ b/arch/parisc/math-emu/sfdiv.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/sfmpy.c b/arch/parisc/math-emu/sfmpy.c index afa406983335..7f4518679880 100644 --- a/arch/parisc/math-emu/sfmpy.c +++ b/arch/parisc/math-emu/sfmpy.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/sfrem.c b/arch/parisc/math-emu/sfrem.c index 3a1b7a34d87a..4ac88d829030 100644 --- a/arch/parisc/math-emu/sfrem.c +++ b/arch/parisc/math-emu/sfrem.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC diff --git a/arch/parisc/math-emu/sfsqrt.c b/arch/parisc/math-emu/sfsqrt.c index 4657a12c9107..8e9e023e7b2e 100644 --- a/arch/parisc/math-emu/sfsqrt.c +++ b/arch/parisc/math-emu/sfsqrt.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -28,7 +15,7 @@ * Single Floating-point Square Root * * External Interfaces: - * sgl_fsqrt(srcptr,nullptr,dstptr,status) + * sgl_fsqrt(srcptr,_nullptr,dstptr,status) * * Internal Interfaces: * @@ -50,7 +37,7 @@ unsigned int sgl_fsqrt( sgl_floating_point *srcptr, - unsigned int *nullptr, + unsigned int *_nullptr, sgl_floating_point *dstptr, unsigned int *status) { diff --git a/arch/parisc/math-emu/sfsub.c b/arch/parisc/math-emu/sfsub.c index 5f90d0f31a52..29d9eed09d12 100644 --- a/arch/parisc/math-emu/sfsub.c +++ b/arch/parisc/math-emu/sfsub.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC @@ -265,7 +252,7 @@ sgl_fsub( return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias - * with denomalized numbers. */ + * with denormalized numbers. */ } else { diff --git a/arch/parisc/math-emu/sgl_float.h b/arch/parisc/math-emu/sgl_float.h index 4ee4cc95e4bf..6ec2662cc365 100644 --- a/arch/parisc/math-emu/sgl_float.h +++ b/arch/parisc/math-emu/sgl_float.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef __NO_PA_HDRS diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile index 134393de69d2..ffdb5c0a8cc6 100644 --- a/arch/parisc/mm/Makefile +++ b/arch/parisc/mm/Makefile @@ -1,6 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for arch/parisc/mm # -obj-y := init.o fault.o ioremap.o +obj-y := init.o fault.o ioremap.o fixmap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c8e8b7c05558..f1785640b049 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -18,9 +18,12 @@ #include <linux/extable.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> +#include <linux/perf_event.h> #include <asm/traps.h> +#define DEBUG_NATLB 0 + /* Various important other fields */ #define bit22set(x) (x & 0x00000200) #define bits23_25set(x) (x & 0x000001c0) @@ -35,7 +38,7 @@ int show_unhandled_signals = 1; /* * parisc_acctyp(unsigned int inst) -- * Given a PA-RISC memory access instruction, determine if the - * the instruction would perform a memory read or memory write + * instruction would perform a memory read or memory write * operation. * * This function assumes that the given instruction is a memory access @@ -47,7 +50,7 @@ int show_unhandled_signals = 1; * VM_WRITE if write operation * VM_EXEC if execute operation */ -static unsigned long +unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { if (code == 6 || code == 16) @@ -66,6 +69,7 @@ parisc_acctyp(unsigned long code, unsigned int inst) case 0x30000000: /* coproc2 */ if (bit22set(inst)) return VM_WRITE; + fallthrough; case 0x0: /* indexed/memory management */ if (bit22set(inst)) { @@ -146,11 +150,16 @@ int fixup_exception(struct pt_regs *regs) * Fix up get_user() and put_user(). * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant * bit in the relative address of the fixup routine to indicate - * that %r8 should be loaded with -EFAULT to report a userspace + * that the register encoded in the "or %r0,%r0,register" + * opcode should be loaded with -EFAULT to report a userspace * access error. */ if (fix->fixup & 1) { - regs->gr[8] = -EFAULT; + int fault_error_reg = fix->err_opcode & 0x1f; + if (!WARN_ON(!fault_error_reg)) + regs->gr[fault_error_reg] = -EFAULT; + pr_debug("Unalignment fixup of register %d at %pS\n", + fault_error_reg, (void*)regs->iaoq[0]); /* zero target register for get_user() */ if (parisc_acctyp(0, regs->iir) == VM_READ) { @@ -188,31 +197,31 @@ int fixup_exception(struct pt_regs *regs) * For implementation see handle_interruption() in traps.c */ static const char * const trap_description[] = { - [1] "High-priority machine check (HPMC)", - [2] "Power failure interrupt", - [3] "Recovery counter trap", - [5] "Low-priority machine check", - [6] "Instruction TLB miss fault", - [7] "Instruction access rights / protection trap", - [8] "Illegal instruction trap", - [9] "Break instruction trap", - [10] "Privileged operation trap", - [11] "Privileged register trap", - [12] "Overflow trap", - [13] "Conditional trap", - [14] "FP Assist Exception trap", - [15] "Data TLB miss fault", - [16] "Non-access ITLB miss fault", - [17] "Non-access DTLB miss fault", - [18] "Data memory protection/unaligned access trap", - [19] "Data memory break trap", - [20] "TLB dirty bit trap", - [21] "Page reference trap", - [22] "Assist emulation trap", - [25] "Taken branch trap", - [26] "Data memory access rights trap", - [27] "Data memory protection ID trap", - [28] "Unaligned data reference trap", + [1] = "High-priority machine check (HPMC)", + [2] = "Power failure interrupt", + [3] = "Recovery counter trap", + [5] = "Low-priority machine check", + [6] = "Instruction TLB miss fault", + [7] = "Instruction access rights / protection trap", + [8] = "Illegal instruction trap", + [9] = "Break instruction trap", + [10] = "Privileged operation trap", + [11] = "Privileged register trap", + [12] = "Overflow trap", + [13] = "Conditional trap", + [14] = "FP Assist Exception trap", + [15] = "Data TLB miss fault", + [16] = "Non-access ITLB miss fault", + [17] = "Non-access DTLB miss fault", + [18] = "Data memory protection/unaligned access trap", + [19] = "Data memory break trap", + [20] = "TLB dirty bit trap", + [21] = "Page reference trap", + [22] = "Assist emulation trap", + [25] = "Taken branch trap", + [26] = "Data memory access rights trap", + [27] = "Data memory protection ID trap", + [28] = "Unaligned data reference trap", }; const char *trap_name(unsigned long code) @@ -264,34 +273,39 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long acc_type; vm_fault_t fault = 0; unsigned int flags; - - if (faulthandler_disabled()) - goto no_context; + char *msg; tsk = current; mm = tsk->mm; - if (!mm) + if (!mm) { + msg = "Page fault: no context"; goto no_context; + } - flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + flags = FAULT_FLAG_DEFAULT; if (user_mode(regs)) flags |= FAULT_FLAG_USER; acc_type = parisc_acctyp(code, regs->iir); if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); - if (!vma || address < vma->vm_start) - goto check_expansion; + if (!vma || address < vma->vm_start) { + if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP)) + goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } + /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ -good_area: - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; @@ -301,9 +315,18 @@ good_area: * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) { + msg = "Page fault: fault signal on kernel memory"; + goto no_context; + } + return; + } - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -321,36 +344,28 @@ good_area: goto bad_area; BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - - /* - * No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } + if (fault & VM_FAULT_RETRY) { + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + flags |= FAULT_FLAG_TRIED; + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; -check_expansion: - vma = prev_vma; - if (vma && (expand_stack(vma, address) == 0)) - goto good_area; - /* * Something tried to access memory that isn't in our memory map.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); + +bad_area_nosemaphore: + if (!user_mode(regs) && fixup_exception(regs)) { + return; + } if (user_mode(regs)) { int signo, si_code; @@ -373,7 +388,7 @@ bad_area: } /* probably address is outside of mapped file */ - /* fall through */ + fallthrough; case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ signo = SIGBUS; @@ -403,15 +418,16 @@ bad_area: lsb = PAGE_SHIFT; force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address, - lsb, current); + lsb); return; } #endif show_signal_msg(regs, code, address, tsk, vma); - force_sig_fault(signo, si_code, (void __user *) address, current); + force_sig_fault(signo, si_code, (void __user *) address); return; } + msg = "Page fault: bad address"; no_context: @@ -419,11 +435,101 @@ no_context: return; } - parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); + parisc_terminate(msg, regs, code, address); - out_of_memory: - up_read(&mm->mmap_sem); - if (!user_mode(regs)) +out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) { + msg = "Page fault: out of memory"; goto no_context; + } pagefault_out_of_memory(); } + +/* Handle non-access data TLB miss faults. + * + * For probe instructions, accesses to userspace are considered allowed + * if they lie in a valid VMA and the access type matches. We are not + * allowed to handle MM faults here so there may be situations where an + * actual access would fail even though a probe was successful. + */ +int +handle_nadtlb_fault(struct pt_regs *regs) +{ + unsigned long insn = regs->iir; + int breg, treg, xreg, val = 0; + struct vm_area_struct *vma; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long address; + unsigned long acc_type; + + switch (insn & 0x380) { + case 0x280: + /* FDC instruction */ + fallthrough; + case 0x380: + /* PDC and FIC instructions */ + if (DEBUG_NATLB && printk_ratelimit()) { + pr_warn("WARNING: nullifying cache flush/purge instruction\n"); + show_regs(regs); + } + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + regs->gr[0] |= PSW_N; + return 1; + + case 0x180: + /* PROBE instruction */ + treg = insn & 0x1f; + if (regs->isr) { + tsk = current; + mm = tsk->mm; + if (mm) { + /* Search for VMA */ + address = regs->ior; + mmap_read_lock(mm); + vma = vma_lookup(mm, address); + mmap_read_unlock(mm); + + /* + * Check if access to the VMA is okay. + * We don't allow for stack expansion. + */ + acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; + if (vma + && (vma->vm_flags & acc_type) == acc_type) + val = 1; + } + } + if (treg) + regs->gr[treg] = val; + regs->gr[0] |= PSW_N; + return 1; + + case 0x300: + /* LPA instruction */ + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + treg = insn & 0x1f; + if (treg) + regs->gr[treg] = 0; + regs->gr[0] |= PSW_N; + return 1; + + default: + break; + } + + return 0; +} diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c new file mode 100644 index 000000000000..ae3493dae9dc --- /dev/null +++ b/arch/parisc/mm/fixmap.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fixmaps for parisc + * + * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> + */ + +#include <linux/kprobes.h> +#include <linux/mm.h> +#include <asm/cacheflush.h> +#include <asm/fixmap.h> + +void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) +{ + unsigned long vaddr = __fix_to_virt(idx); + pgd_t *pgd = pgd_offset_k(vaddr); + p4d_t *p4d = p4d_offset(pgd, vaddr); + pud_t *pud = pud_offset(p4d, vaddr); + pmd_t *pmd = pmd_offset(pud, vaddr); + pte_t *pte; + + pte = pte_offset_kernel(pmd, vaddr); + set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); + flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); +} + +void notrace clear_fixmap(enum fixed_addresses idx) +{ + unsigned long vaddr = __fix_to_virt(idx); + pte_t *pte = virt_to_kpte(vaddr); + + if (WARN_ON(pte_none(*pte))) + return; + + pte_clear(&init_mm, vaddr, pte); + + flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); +} diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d77479ae3af2..a94fe546d434 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -15,40 +15,19 @@ #include <linux/sysctl.h> #include <asm/mman.h> -#include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> -unsigned long -hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > TASK_SIZE) - return -ENOMEM; - - if (flags & MAP_FIXED) - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - - if (addr) - addr = ALIGN(addr, huge_page_size(h)); - - /* we need to make sure the colouring is OK */ - return arch_get_unmapped_area(file, addr, len, pgoff, flags); -} -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; @@ -61,11 +40,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, addr &= HPAGE_MASK; pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); + p4d = p4d_offset(pgd, addr); + pud = pud_alloc(mm, p4d, addr); if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } return pte; } @@ -74,6 +54,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; @@ -82,11 +63,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd = pgd_offset(mm, addr); if (!pgd_none(*pgd)) { - pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) - pte = pte_offset_map(pmd, addr); + p4d = p4d_offset(pgd, addr); + if (!p4d_none(*p4d)) { + pud = pud_offset(p4d, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) + pte = pte_offset_huge(pmd, addr); + } } } return pte; @@ -135,26 +119,19 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, unsigned long sz) { - unsigned long flags; - - purge_tlb_start(flags); __set_huge_pte_at(mm, addr, ptep, entry); - purge_tlb_end(flags); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { - unsigned long flags; pte_t entry; - purge_tlb_start(flags); entry = *ptep; __set_huge_pte_at(mm, addr, ptep, __pte(0)); - purge_tlb_end(flags); return entry; } @@ -163,38 +140,22 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t old_pte; - purge_tlb_start(flags); old_pte = *ptep; __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); - purge_tlb_end(flags); } int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - unsigned long flags; int changed; + struct mm_struct *mm = vma->vm_mm; - purge_tlb_start(flags); changed = !pte_same(*ptep, pte); if (changed) { - __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + __set_huge_pte_at(mm, addr, ptep, pte); } - purge_tlb_end(flags); return changed; } - - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 059187a3ded7..14270715d754 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -24,34 +24,27 @@ #include <linux/nodemask.h> /* for node_online_map */ #include <linux/pagemap.h> /* for release_pages */ #include <linux/compat.h> +#include <linux/execmem.h> #include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/pdc_chassis.h> #include <asm/mmzone.h> #include <asm/sections.h> #include <asm/msgbuf.h> +#include <asm/sparsemem.h> +#include <asm/asm-offsets.h> +#include <asm/shmbuf.h> extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ #if CONFIG_PGTABLE_LEVELS == 3 -/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually - * guarantee that global objects will be laid out in memory in the same order - * as the order of declaration, so put these in different sections and use - * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); +pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); - -#ifdef CONFIG_DISCONTIGMEM -struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; -#endif +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE))); static struct resource data_resource = { .name = "Kernel data", @@ -70,47 +63,17 @@ static struct resource pdcdata_resource = { .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; -static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; +static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; /* The following array is initialized from the firmware specific * information retrieved in kernel/inventory.c. */ -physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; -int npmem_ranges __read_mostly; - -/* - * get_memblock() allocates pages via memblock. - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it - * doesn't allocate from bottom to top which is needed because we only created - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. - */ -static void * __init get_memblock(unsigned long size) -{ - static phys_addr_t search_addr __initdata; - phys_addr_t phys; - - if (!search_addr) - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); - search_addr = ALIGN(search_addr, size); - while (!memblock_is_region_memory(search_addr, size) || - memblock_is_region_reserved(search_addr, size)) { - search_addr += size; - } - phys = search_addr; - - if (phys) - memblock_reserve(phys, size); - else - panic("get_memblock() failed.\n"); - - memset(__va(phys), 0, size); - - return __va(phys); -} +physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; +int npmem_ranges __initdata; #ifdef CONFIG_64BIT -#define MAX_MEM (~0UL) +#define MAX_MEM (1UL << MAX_PHYSMEM_BITS) #else /* !CONFIG_64BIT */ #define MAX_MEM (3584U*1024U*1024U) #endif /* !CONFIG_64BIT */ @@ -149,7 +112,7 @@ static void __init mem_limit_func(void) static void __init setup_bootmem(void) { unsigned long mem_max; -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_SPARSEMEM physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; int npmem_holes; #endif @@ -167,23 +130,16 @@ static void __init setup_bootmem(void) int j; for (j = i; j > 0; j--) { - unsigned long tmp; - if (pmem_ranges[j-1].start_pfn < pmem_ranges[j].start_pfn) { break; } - tmp = pmem_ranges[j-1].start_pfn; - pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; - pmem_ranges[j].start_pfn = tmp; - tmp = pmem_ranges[j-1].pages; - pmem_ranges[j-1].pages = pmem_ranges[j].pages; - pmem_ranges[j].pages = tmp; + swap(pmem_ranges[j-1], pmem_ranges[j]); } } -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_SPARSEMEM /* * Throw out ranges that are too far apart (controlled by * MAX_GAP). @@ -195,7 +151,7 @@ static void __init setup_bootmem(void) pmem_ranges[i-1].pages) > MAX_GAP) { npmem_ranges = i; printk("Large gap in memory detected (%ld pages). " - "Consider turning on CONFIG_DISCONTIGMEM\n", + "Consider turning on CONFIG_SPARSEMEM\n", pmem_ranges[i].start_pfn - (pmem_ranges[i-1].start_pfn + pmem_ranges[i-1].pages)); @@ -260,9 +216,8 @@ static void __init setup_bootmem(void) printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_SPARSEMEM /* Merge the ranges, keeping track of the holes */ - { unsigned long end_pfn; unsigned long hole_pages; @@ -285,18 +240,6 @@ static void __init setup_bootmem(void) } #endif -#ifdef CONFIG_DISCONTIGMEM - for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { - memset(NODE_DATA(i), 0, sizeof(pg_data_t)); - } - memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); - - for (i = 0; i < npmem_ranges; i++) { - node_set_state(i, N_NORMAL_MEMORY); - node_set_online(i); - } -#endif - /* * Initialize and free the full range of memory in each range. */ @@ -321,6 +264,13 @@ static void __init setup_bootmem(void) max_pfn = start_pfn + npages; } + /* + * We can't use memblock top-down allocations because we only + * created the initial mapping up to KERNEL_INITIAL_SIZE in + * the assembly bootup code. + */ + memblock_set_bottom_up(true); + /* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. @@ -337,7 +287,7 @@ static void __init setup_bootmem(void) memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); -#ifndef CONFIG_DISCONTIGMEM +#ifndef CONFIG_SPARSEMEM /* reserve the holes */ @@ -383,24 +333,17 @@ static void __init setup_bootmem(void) /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ pdc_pdt_init(); -} - -static int __init parisc_text_address(unsigned long vaddr) -{ - static unsigned long head_ptr __initdata; - - if (!head_ptr) - head_ptr = PAGE_MASK & (unsigned long) - dereference_function_descriptor(&parisc_kernel_start); - return core_kernel_text(vaddr) || vaddr == head_ptr; + memblock_allow_resize(); + memblock_dump_all(); } -static void __init map_pages(unsigned long start_vaddr, - unsigned long start_paddr, unsigned long size, - pgprot_t pgprot, int force) +static bool kernel_set_to_readonly; + +static void __ref map_pages(unsigned long start_vaddr, + unsigned long start_paddr, unsigned long size, + pgprot_t pgprot, int force) { - pgd_t *pg_dir; pmd_t *pmd; pte_t *pg_table; unsigned long end_paddr; @@ -412,86 +355,68 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; - unsigned long kernel_end; + unsigned long kernel_start, kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); + kernel_start = __pa((unsigned long)&__init_begin); kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; - pg_dir = pgd_offset_k(start_vaddr); - -#if PTRS_PER_PMD == 1 - start_pmd = 0; -#else + /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); -#endif start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); address = start_paddr; vaddr = start_vaddr; while (address < end_paddr) { -#if PTRS_PER_PMD == 1 - pmd = (pmd_t *)__pa(pg_dir); -#else - pmd = (pmd_t *)pgd_address(*pg_dir); - - /* - * pmd is physical at this point - */ + pgd_t *pgd = pgd_offset_k(vaddr); + p4d_t *p4d = p4d_offset(pgd, vaddr); + pud_t *pud = pud_offset(p4d, vaddr); - if (!pmd) { - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); - pmd = (pmd_t *) __pa(pmd); +#if CONFIG_PGTABLE_LEVELS == 3 + if (pud_none(*pud)) { + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, + PAGE_SIZE << PMD_TABLE_ORDER); + pud_populate(NULL, pud, pmd); } - - pgd_populate(NULL, pg_dir, __va(pmd)); #endif - pg_dir++; - - /* now change pmd to kernel virtual addresses */ - pmd = (pmd_t *)__va(pmd) + start_pmd; + pmd = pmd_offset(pud, vaddr); for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { - - /* - * pg_table is physical at this point - */ - - pg_table = (pte_t *)pmd_address(*pmd); - if (!pg_table) { - pg_table = (pte_t *) get_memblock(PAGE_SIZE); - pg_table = (pte_t *) __pa(pg_table); + if (pmd_none(*pmd)) { + pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(NULL, pmd, pg_table); } - pmd_populate_kernel(NULL, pmd, __va(pg_table)); - - /* now change pg_table to kernel virtual addresses */ - - pg_table = (pte_t *) __va(pg_table) + start_pte; + pg_table = pte_offset_kernel(pmd, vaddr); for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; - - if (force) - pte = __mk_pte(address, pgprot); - else if (parisc_text_address(vaddr)) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); + pgprot_t prot; + bool huge = false; + + if (force) { + prot = pgprot; + } else if (address < kernel_start || address >= kernel_end) { + /* outside kernel memory */ + prot = PAGE_KERNEL; + } else if (!kernel_set_to_readonly) { + /* still initializing, allow writing to RO memory */ + prot = PAGE_KERNEL_RWX; + huge = true; + } else if (address >= ro_start) { + /* Code (ro) and Data areas */ + prot = (address < ro_end) ? + PAGE_KERNEL_EXEC : PAGE_KERNEL; + huge = true; + } else { + prot = PAGE_KERNEL; } - else -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) - if (address >= ro_start && address < ro_end) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); + + pte = __mk_pte(address, prot); + if (huge) pte = pte_mkhuge(pte); - } else -#endif - { - pte = __mk_pte(address, pgprot); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); - } if (address >= end_paddr) break; @@ -512,7 +437,7 @@ static void __init map_pages(unsigned long start_vaddr, void __init set_kernel_text_rw(int enable_read_write) { - unsigned long start = (unsigned long) _text; + unsigned long start = (unsigned long) __init_begin; unsigned long end = (unsigned long) &data_start; map_pages(start, __pa(start), end-start, @@ -523,15 +448,19 @@ void __init set_kernel_text_rw(int enable_read_write) flush_tlb_all(); } -void __ref free_initmem(void) +void free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; + unsigned long kernel_end = (unsigned long)&_end; + + /* Remap kernel text and data, but do not touch init section yet. */ + map_pages(init_end, __pa(init_end), kernel_end - init_end, + PAGE_KERNEL, 0); /* The init text pages are marked R-X. We have to * flush the icache and mark them RW- * - * This is tricky, because map_pages is in the init section. * Do a dummy remap of the data section first (the data * section is already PAGE_KERNEL) to pull in the TLB entries * for map_kernel */ @@ -543,12 +472,12 @@ void __ref free_initmem(void) PAGE_KERNEL, 1); /* force the kernel to see the new TLB entries */ - __flush_tlb_range(0, init_begin, init_end); + __flush_tlb_range(0, init_begin, kernel_end); /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - + free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ @@ -559,10 +488,18 @@ void __ref free_initmem(void) #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { - /* rodata memory was already mapped with KERNEL_RO access rights by - pagetable_init() and map_pages(). No need to do additional stuff here */ - printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (unsigned long)(__end_rodata - __start_rodata) >> 10); + unsigned long start = (unsigned long) &__start_rodata; + unsigned long end = (unsigned long) &__end_rodata; + + pr_info("Write protecting the kernel read-only data: %luk\n", + (end - start) >> 10); + + kernel_set_to_readonly = true; + map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); + + /* force the kernel to see the new page table entries */ + flush_cache_all(); + flush_tlb_all(); } #endif @@ -588,13 +525,9 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *parisc_vmalloc_start __read_mostly; +void *parisc_vmalloc_start __ro_after_init; EXPORT_SYMBOL(parisc_vmalloc_start); -#ifdef CONFIG_PA11 -unsigned long pcxl_dma_start __read_mostly; -#endif - void __init mem_init(void) { /* Do sanity checks on IPC (compat) structures */ @@ -617,10 +550,17 @@ void __init mem_init(void) BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD > BITS_PER_LONG); +#if CONFIG_PGTABLE_LEVELS == 3 + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); +#else + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); +#endif - high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); - memblock_free_all(); +#ifdef CONFIG_64BIT + /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */ + BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000); + BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); +#endif #ifdef CONFIG_PA11 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { @@ -631,23 +571,25 @@ void __init mem_init(void) #endif parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); - mem_init_print_info(NULL); - #if 0 /* * Do not expose the virtual kernel memory layout to userspace. * But keep code for debugging purposes. */ printk("virtual kernel memory layout:\n" - " vmalloc : 0x%px - 0x%px (%4ld MB)\n" - " memory : 0x%px - 0x%px (%4ld MB)\n" - " .init : 0x%px - 0x%px (%4ld kB)\n" - " .data : 0x%px - 0x%px (%4ld kB)\n" - " .text : 0x%px - 0x%px (%4ld kB)\n", + " vmalloc : 0x%px - 0x%px (%4ld MB)\n" + " fixmap : 0x%px - 0x%px (%4ld kB)\n" + " memory : 0x%px - 0x%px (%4ld MB)\n" + " .init : 0x%px - 0x%px (%4ld kB)\n" + " .data : 0x%px - 0x%px (%4ld kB)\n" + " .text : 0x%px - 0x%px (%4ld kB)\n", (void*)VMALLOC_START, (void*)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, + (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), + (unsigned long)(FIXMAP_SIZE / 1024), + __va(0), high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, @@ -662,7 +604,7 @@ void __init mem_init(void) #endif } -unsigned long *empty_zero_page __read_mostly; +unsigned long *empty_zero_page __ro_after_init; EXPORT_SYMBOL(empty_zero_page); /* @@ -681,12 +623,10 @@ static void __init pagetable_init(void) for (range = 0; range < npmem_ranges; range++) { unsigned long start_paddr; - unsigned long end_paddr; unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; size = pmem_ranges[range].pages << PAGE_SHIFT; - end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); @@ -700,7 +640,8 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = get_memblock(PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); + } static void __init gateway_init(void) @@ -723,37 +664,126 @@ static void __init gateway_init(void) PAGE_SIZE, PAGE_GATEWAY, 1); } -void __init paging_init(void) +static void __init fixmap_init(void) { - int i; + unsigned long addr = FIXMAP_START; + unsigned long end = FIXMAP_START + FIXMAP_SIZE; + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); + pmd_t *pmd; + + BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); + +#if CONFIG_PGTABLE_LEVELS == 3 + if (pud_none(*pud)) { + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, + PAGE_SIZE << PMD_TABLE_ORDER); + pud_populate(NULL, pud, pmd); + } +#endif + + pmd = pmd_offset(pud, addr); + do { + pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); + + pmd_populate_kernel(&init_mm, pmd, pte); + + addr += PAGE_SIZE; + } while (addr < end); +} + +static void __init parisc_bootmem_free(void) +{ + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; + max_zone_pfn[0] = memblock_end_of_DRAM(); + + free_area_init(max_zone_pfn); +} + +void __init paging_init(void) +{ setup_bootmem(); pagetable_init(); gateway_init(); + fixmap_init(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); - for (i = 0; i < npmem_ranges; i++) { - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; - - zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; - -#ifdef CONFIG_DISCONTIGMEM - /* Need to initialize the pfnnid_map before we can initialize - the zone */ - { - int j; - for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); - j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); - j++) { - pfnnid_map[j] = i; - } + sparse_init(); + parisc_bootmem_free(); +} + +static void alloc_btlb(unsigned long start, unsigned long end, int *slot, + unsigned long entry_info) +{ + const int slot_max = btlb_info.fixed_range_info.num_comb; + int min_num_pages = btlb_info.min_size; + unsigned long size; + + /* map at minimum 4 pages */ + if (min_num_pages < 4) + min_num_pages = 4; + + size = HUGEPAGE_SIZE; + while (start < end && *slot < slot_max && size >= PAGE_SIZE) { + /* starting address must have same alignment as size! */ + /* if correctly aligned and fits in double size, increase */ + if (((start & (2 * size - 1)) == 0) && + (end - start) >= (2 * size)) { + size <<= 1; + continue; } -#endif + /* if current size alignment is too big, try smaller size */ + if ((start & (size - 1)) != 0) { + size >>= 1; + continue; + } + if ((end - start) >= size) { + if ((size >> PAGE_SHIFT) >= min_num_pages) + pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT, + size >> PAGE_SHIFT, entry_info, *slot); + (*slot)++; + start += size; + continue; + } + size /= 2; + continue; + } +} - free_area_init_node(i, zones_size, - pmem_ranges[i].start_pfn, NULL); +void btlb_init_per_cpu(void) +{ + unsigned long s, t, e; + int slot; + + /* BTLBs are not available on 64-bit CPUs */ + if (IS_ENABLED(CONFIG_PA20)) + return; + else if (pdc_btlb_info(&btlb_info) < 0) { + memset(&btlb_info, 0, sizeof btlb_info); } + + /* insert BLTLBs for code and data segments */ + s = (uintptr_t) dereference_function_descriptor(&_stext); + e = (uintptr_t) dereference_function_descriptor(&_etext); + t = (uintptr_t) dereference_function_descriptor(&_sdata); + BUG_ON(t != e); + + /* code segments */ + slot = 0; + alloc_btlb(s, e, &slot, 0x13800000); + + /* sanity check */ + t = (uintptr_t) dereference_function_descriptor(&_edata); + e = (uintptr_t) dereference_function_descriptor(&__bss_start); + BUG_ON(t != e); + + /* data segments */ + s = (uintptr_t) dereference_function_descriptor(&_sdata); + e = (uintptr_t) dereference_function_descriptor(&__bss_stop); + alloc_btlb(s, e, &slot, 0x11800000); } #ifdef CONFIG_PA20 @@ -786,7 +816,7 @@ static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ static unsigned long dirty_space_id[SID_ARRAY_SIZE]; static unsigned long space_id_index; static unsigned long free_space_ids = NR_SPACE_IDS - 1; -static unsigned long dirty_space_ids = 0; +static unsigned long dirty_space_ids; static DEFINE_SPINLOCK(sid_lock); @@ -808,7 +838,7 @@ unsigned long alloc_sid(void) free_space_ids--; index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); - space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); + space_id[BIT_WORD(index)] |= BIT_MASK(index); space_id_index = index; spin_unlock(&sid_lock); @@ -819,16 +849,16 @@ unsigned long alloc_sid(void) void free_sid(unsigned long spaceid) { unsigned long index = spaceid >> SPACEID_SHIFT; - unsigned long *dirty_space_offset; + unsigned long *dirty_space_offset, mask; - dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); - index &= (BITS_PER_LONG - 1); + dirty_space_offset = &dirty_space_id[BIT_WORD(index)]; + mask = BIT_MASK(index); spin_lock(&sid_lock); - BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ + BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */ - *dirty_space_offset |= (1L << index); + *dirty_space_offset |= mask; dirty_space_ids++; spin_unlock(&sid_lock); @@ -907,9 +937,9 @@ void flush_tlb_all(void) { int do_recycle; - __inc_irq_stat(irq_tlb_count); do_recycle = 0; spin_lock(&sid_lock); + __inc_irq_stat(irq_tlb_count); if (dirty_space_ids > RECYCLE_THRESHOLD) { BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ get_dirty_sids(&recycle_ndirty,recycle_dirty_array); @@ -928,17 +958,50 @@ void flush_tlb_all(void) #else void flush_tlb_all(void) { - __inc_irq_stat(irq_tlb_count); spin_lock(&sid_lock); + __inc_irq_stat(irq_tlb_count); flush_tlb_all_local(NULL); recycle_sids(); spin_unlock(&sid_lock); } #endif -#ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_NONE, + [VM_WRITE | VM_READ] = PAGE_READONLY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT + +#ifdef CONFIG_EXECMEM +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) { - free_reserved_area((void *)start, (void *)end, -1, "initrd"); + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL_RWX, + .alignment = 1, + }, + }, + }; + + return &execmem_info; } -#endif +#endif /* CONFIG_EXECMEM */ diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 92a9b5f12f98..0b65c4b3baee 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -3,7 +3,7 @@ * arch/parisc/mm/ioremap.c * * (C) Copyright 1995 1996 Linus Torvalds - * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de> + * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de> * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> */ @@ -11,42 +11,19 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/io.h> -#include <asm/pgalloc.h> +#include <linux/mm.h> -/* - * Generic mapping function (not visible outside): - */ - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t prot) { - void __iomem *addr; - struct vm_struct *area; - unsigned long offset, last_addr; - pgprot_t pgprot; - #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; /* Support EISA addresses */ if ((phys_addr >= 0x00080000 && end < 0x000fffff) || - (phys_addr >= 0x00500000 && end < 0x03bfffff)) { + (phys_addr >= 0x00500000 && end < 0x03bfffff)) phys_addr |= F_EXTEND(0xfc000000); - flags |= _PAGE_NO_CACHE; - } #endif - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - /* * Don't allow anybody to remap normal RAM that we're using.. */ @@ -64,37 +41,6 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } } - pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | - _PAGE_ACCESSED | flags); - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - addr = (void __iomem *) area->addr; - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, - phys_addr, pgprot)) { - vfree(addr); - return NULL; - } - - return (void __iomem *) (offset + (char __iomem *)addr); -} -EXPORT_SYMBOL(__ioremap); - -void iounmap(const volatile void __iomem *addr) -{ - if (addr > high_memory) - return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); + return generic_ioremap_prot(phys_addr, size, prot); } -EXPORT_SYMBOL(iounmap); +EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/parisc/net/Makefile b/arch/parisc/net/Makefile new file mode 100644 index 000000000000..22b12024d4c3 --- /dev/null +++ b/arch/parisc/net/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o + +ifeq ($(CONFIG_64BIT),y) + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o +else + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o +endif diff --git a/arch/parisc/net/bpf_jit.h b/arch/parisc/net/bpf_jit.h new file mode 100644 index 000000000000..8b8896959f04 --- /dev/null +++ b/arch/parisc/net/bpf_jit.h @@ -0,0 +1,479 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common functionality for PARISC32 and PARISC64 BPF JIT compilers + * + * Copyright (c) 2023 Helge Deller <deller@gmx.de> + * + */ + +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <asm/cacheflush.h> + +#define HPPA_JIT_DEBUG 0 +#define HPPA_JIT_REBOOT 0 +#define HPPA_JIT_DUMP 0 + +#define OPTIMIZE_HPPA 1 /* enable some asm optimizations */ +// echo 1 > /proc/sys/net/core/bpf_jit_enable + +#define HPPA_R(nr) nr /* use HPPA register #nr */ + +enum { + HPPA_REG_ZERO = 0, /* The constant value 0 */ + HPPA_REG_R1 = 1, /* used for addil */ + HPPA_REG_RP = 2, /* Return address */ + + HPPA_REG_ARG7 = 19, /* ARG4-7 used in 64-bit ABI */ + HPPA_REG_ARG6 = 20, + HPPA_REG_ARG5 = 21, + HPPA_REG_ARG4 = 22, + + HPPA_REG_ARG3 = 23, /* ARG0-3 in 32- and 64-bit ABI */ + HPPA_REG_ARG2 = 24, + HPPA_REG_ARG1 = 25, + HPPA_REG_ARG0 = 26, + HPPA_REG_GP = 27, /* Global pointer */ + HPPA_REG_RET0 = 28, /* Return value, HI in 32-bit */ + HPPA_REG_RET1 = 29, /* Return value, LOW in 32-bit */ + HPPA_REG_SP = 30, /* Stack pointer */ + HPPA_REG_R31 = 31, + +#ifdef CONFIG_64BIT + HPPA_REG_TCC = 3, + HPPA_REG_TCC_SAVED = 4, + HPPA_REG_TCC_IN_INIT = HPPA_REG_R31, +#else + HPPA_REG_TCC = 18, + HPPA_REG_TCC_SAVED = 17, + HPPA_REG_TCC_IN_INIT = HPPA_REG_R31, +#endif + + HPPA_REG_T0 = HPPA_REG_R1, /* Temporaries */ + HPPA_REG_T1 = HPPA_REG_R31, + HPPA_REG_T2 = HPPA_REG_ARG4, +#ifndef CONFIG_64BIT + HPPA_REG_T3 = HPPA_REG_ARG5, /* not used in 64-bit */ + HPPA_REG_T4 = HPPA_REG_ARG6, + HPPA_REG_T5 = HPPA_REG_ARG7, +#endif +}; + +struct hppa_jit_context { + struct bpf_prog *prog; + u32 *insns; /* HPPA insns */ + int ninsns; + int reg_seen_collect; + int reg_seen; + int body_len; + int epilogue_offset; + int prologue_len; + int *offset; /* BPF to HPPA */ +}; + +#define REG_SET_SEEN(ctx, nr) { if (ctx->reg_seen_collect) ctx->reg_seen |= BIT(nr); } +#define REG_SET_SEEN_ALL(ctx) { if (ctx->reg_seen_collect) ctx->reg_seen = -1; } +#define REG_FORCE_SEEN(ctx, nr) { ctx->reg_seen |= BIT(nr); } +#define REG_WAS_SEEN(ctx, nr) (ctx->reg_seen & BIT(nr)) +#define REG_ALL_SEEN(ctx) (ctx->reg_seen == -1) + +#define HPPA_INSN_SIZE 4 /* bytes per HPPA asm instruction */ +#define REG_SIZE REG_SZ /* bytes per native "long" word */ + +/* subtract hppa displacement on branches which is .+8 */ +#define HPPA_BRANCH_DISPLACEMENT 2 /* instructions */ + +/* asm statement indicator to execute delay slot */ +#define EXEC_NEXT_INSTR 0 +#define NOP_NEXT_INSTR 1 + +#define im11(val) (((u32)(val)) & 0x07ff) + +#define hppa_ldil(addr, reg) \ + hppa_t5_insn(0x08, reg, ((u32)(addr)) >> 11) /* ldil im21,reg */ +#define hppa_addil(addr, reg) \ + hppa_t5_insn(0x0a, reg, ((u32)(addr)) >> 11) /* addil im21,reg -> result in gr1 */ +#define hppa_ldo(im14, reg, target) \ + hppa_t1_insn(0x0d, reg, target, im14) /* ldo val14(reg),target */ +#define hppa_ldi(im14, reg) \ + hppa_ldo(im14, HPPA_REG_ZERO, reg) /* ldi val14,reg */ +#define hppa_or(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x09, target) /* or reg1,reg2,target */ +#define hppa_or_cond(reg1, reg2, cond, f, target) \ + hppa_t6_insn(0x02, reg2, reg1, cond, f, 0x09, target) +#define hppa_and(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x08, target) /* and reg1,reg2,target */ +#define hppa_and_cond(reg1, reg2, cond, f, target) \ + hppa_t6_insn(0x02, reg2, reg1, cond, f, 0x08, target) +#define hppa_xor(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x0a, target) /* xor reg1,reg2,target */ +#define hppa_add(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x18, target) /* add reg1,reg2,target */ +#define hppa_addc(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x1c, target) /* add,c reg1,reg2,target */ +#define hppa_sub(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x10, target) /* sub reg1,reg2,target */ +#define hppa_subb(reg1, reg2, target) \ + hppa_t6_insn(0x02, reg2, reg1, 0, 0, 0x14, target) /* sub,b reg1,reg2,target */ +#define hppa_nop() \ + hppa_or(0,0,0) /* nop: or 0,0,0 */ +#define hppa_addi(val11, reg, target) \ + hppa_t7_insn(0x2d, reg, target, val11) /* addi im11,reg,target */ +#define hppa_subi(val11, reg, target) \ + hppa_t7_insn(0x25, reg, target, val11) /* subi im11,reg,target */ +#define hppa_copy(reg, target) \ + hppa_or(reg, HPPA_REG_ZERO, target) /* copy reg,target */ +#define hppa_ldw(val14, reg, target) \ + hppa_t1_insn(0x12, reg, target, val14) /* ldw im14(reg),target */ +#define hppa_ldb(val14, reg, target) \ + hppa_t1_insn(0x10, reg, target, val14) /* ldb im14(reg),target */ +#define hppa_ldh(val14, reg, target) \ + hppa_t1_insn(0x11, reg, target, val14) /* ldh im14(reg),target */ +#define hppa_stw(reg, val14, base) \ + hppa_t1_insn(0x1a, base, reg, val14) /* stw reg,im14(base) */ +#define hppa_stb(reg, val14, base) \ + hppa_t1_insn(0x18, base, reg, val14) /* stb reg,im14(base) */ +#define hppa_sth(reg, val14, base) \ + hppa_t1_insn(0x19, base, reg, val14) /* sth reg,im14(base) */ +#define hppa_stwma(reg, val14, base) \ + hppa_t1_insn(0x1b, base, reg, val14) /* stw,ma reg,im14(base) */ +#define hppa_bv(reg, base, nop) \ + hppa_t11_insn(0x3a, base, reg, 0x06, 0, nop) /* bv(,n) reg(base) */ +#define hppa_be(offset, base) \ + hppa_t12_insn(0x38, base, offset, 0x00, 1) /* be,n offset(0,base) */ +#define hppa_be_l(offset, base, nop) \ + hppa_t12_insn(0x39, base, offset, 0x00, nop) /* ble(,nop) offset(0,base) */ +#define hppa_mtctl(reg, cr) \ + hppa_t21_insn(0x00, cr, reg, 0xc2, 0) /* mtctl reg,cr */ +#define hppa_mtsar(reg) \ + hppa_mtctl(reg, 11) /* mtsar reg */ +#define hppa_zdep(r, p, len, target) \ + hppa_t10_insn(0x35, target, r, 0, 2, p, len) /* zdep r,a,b,t */ +#define hppa_shl(r, len, target) \ + hppa_zdep(r, len, len, lo(rd)) +#define hppa_depwz(r, p, len, target) \ + hppa_t10_insn(0x35, target, r, 0, 3, 31-(p), 32-(len)) /* depw,z r,p,len,ret1 */ +#define hppa_depwz_sar(reg, target) \ + hppa_t1_insn(0x35, target, reg, 0) /* depw,z reg,sar,32,target */ +#define hppa_shrpw_sar(reg, target) \ + hppa_t10_insn(0x34, reg, 0, 0, 0, 0, target) /* shrpw r0,reg,sar,target */ +#define hppa_shrpw(r1, r2, p, target) \ + hppa_t10_insn(0x34, r2, r1, 0, 2, 31-(p), target) /* shrpw r1,r2,p,target */ +#define hppa_shd(r1, r2, p, target) \ + hppa_t10_insn(0x34, r2, r1, 0, 2, 31-(p), target) /* shrpw r1,r2,p,tarfer */ +#define hppa_extrws_sar(reg, target) \ + hppa_t10_insn(0x34, reg, target, 0, 5, 0, 0) /* extrw,s reg,sar,32,ret0 */ +#define hppa_extrws(reg, p, len, target) \ + hppa_t10_insn(0x34, reg, target, 0, 7, p, len) /* extrw,s reg,p,len,target */ +#define hppa_extru(r, p, len, target) \ + hppa_t10_insn(0x34, r, target, 0, 6, p, 32-(len)) +#define hppa_shr(r, len, target) \ + hppa_extru(r, 31-(len), 32-(len), target) +#define hppa_bl(imm17, rp) \ + hppa_t12_insn(0x3a, rp, imm17, 0x00, 1) /* bl,n target_addr,rp */ +#define hppa_sh2add(r1, r2, target) \ + hppa_t6_insn(0x02, r2, r1, 0, 0, 0x1a, target) /* sh2add r1,r2,target */ + +#define hppa_combt(r1, r2, target_addr, condition, nop) \ + hppa_t11_insn(IS_ENABLED(CONFIG_64BIT) ? 0x27 : 0x20, \ + r2, r1, condition, target_addr, nop) /* combt,cond,n r1,r2,addr */ +#define hppa_beq(r1, r2, target_addr) \ + hppa_combt(r1, r2, target_addr, 1, NOP_NEXT_INSTR) +#define hppa_blt(r1, r2, target_addr) \ + hppa_combt(r1, r2, target_addr, 2, NOP_NEXT_INSTR) +#define hppa_ble(r1, r2, target_addr) \ + hppa_combt(r1, r2, target_addr, 3, NOP_NEXT_INSTR) +#define hppa_bltu(r1, r2, target_addr) \ + hppa_combt(r1, r2, target_addr, 4, NOP_NEXT_INSTR) +#define hppa_bleu(r1, r2, target_addr) \ + hppa_combt(r1, r2, target_addr, 5, NOP_NEXT_INSTR) + +#define hppa_combf(r1, r2, target_addr, condition, nop) \ + hppa_t11_insn(IS_ENABLED(CONFIG_64BIT) ? 0x2f : 0x22, \ + r2, r1, condition, target_addr, nop) /* combf,cond,n r1,r2,addr */ +#define hppa_bne(r1, r2, target_addr) \ + hppa_combf(r1, r2, target_addr, 1, NOP_NEXT_INSTR) +#define hppa_bge(r1, r2, target_addr) \ + hppa_combf(r1, r2, target_addr, 2, NOP_NEXT_INSTR) +#define hppa_bgt(r1, r2, target_addr) \ + hppa_combf(r1, r2, target_addr, 3, NOP_NEXT_INSTR) +#define hppa_bgeu(r1, r2, target_addr) \ + hppa_combf(r1, r2, target_addr, 4, NOP_NEXT_INSTR) +#define hppa_bgtu(r1, r2, target_addr) \ + hppa_combf(r1, r2, target_addr, 5, NOP_NEXT_INSTR) + +/* 64-bit instructions */ +#ifdef CONFIG_64BIT +#define hppa64_ldd_reg(reg, b, target) \ + hppa_t10_insn(0x03, b, reg, 0, 0, 3<<1, target) +#define hppa64_ldd_im5(im5, b, target) \ + hppa_t10_insn(0x03, b, low_sign_unext(im5,5), 0, 1<<2, 3<<1, target) +#define hppa64_ldd_im16(im16, b, target) \ + hppa_t10_insn(0x14, b, target, 0, 0, 0, 0) | re_assemble_16(im16) +#define hppa64_std_im5(src, im5, b) \ + hppa_t10_insn(0x03, b, src, 0, 1<<2, 0xB<<1, low_sign_unext(im5,5)) +#define hppa64_std_im16(src, im16, b) \ + hppa_t10_insn(0x1c, b, src, 0, 0, 0, 0) | re_assemble_16(im16) +#define hppa64_bl_long(offs22) \ + hppa_t12_L_insn(0x3a, offs22, 1) +#define hppa64_mtsarcm(reg) \ + hppa_t21_insn(0x00, 11, reg, 0xc6, 0) +#define hppa64_shrpd_sar(reg, target) \ + hppa_t10_insn(0x34, reg, 0, 0, 0, 1<<4, target) +#define hppa64_shladd(r1, sa, r2, target) \ + hppa_t6_insn(0x02, r2, r1, 0, 0, 1<<4|1<<3|sa, target) +#define hppa64_depdz_sar(reg, target) \ + hppa_t21_insn(0x35, target, reg, 3<<3, 0) +#define hppa_extrd_sar(reg, target, se) \ + hppa_t10_insn(0x34, reg, target, 0, 0, 0, 0) | 2<<11 | (se&1)<<10 | 1<<9 | 1<<8 +#define hppa64_bve_l_rp(base) \ + (0x3a << 26) | (base << 21) | 0xf000 +#define hppa64_permh_3210(r, target) \ + (0x3e << 26) | (r << 21) | (r << 16) | (target) | 0x00006900 +#define hppa64_hshl(r, sa, target) \ + (0x3e << 26) | (0 << 21) | (r << 16) | (sa << 6) | (target) | 0x00008800 +#define hppa64_hshr_u(r, sa, target) \ + (0x3e << 26) | (r << 21) | (0 << 16) | (sa << 6) | (target) | 0x0000c800 +#endif + +struct hppa_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct hppa_jit_context ctx; +}; + +static inline void bpf_fill_ill_insns(void *area, unsigned int size) +{ + memset(area, 0, size); +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +/* Emit a 4-byte HPPA instruction. */ +static inline void emit(const u32 insn, struct hppa_jit_context *ctx) +{ + if (ctx->insns) { + ctx->insns[ctx->ninsns] = insn; + } + + ctx->ninsns++; +} + +static inline int epilogue_offset(struct hppa_jit_context *ctx) +{ + int to = ctx->epilogue_offset, from = ctx->ninsns; + + return (to - from); +} + +/* Return -1 or inverted cond. */ +static inline int invert_bpf_cond(u8 cond) +{ + switch (cond) { + case BPF_JEQ: + return BPF_JNE; + case BPF_JGT: + return BPF_JLE; + case BPF_JLT: + return BPF_JGE; + case BPF_JGE: + return BPF_JLT; + case BPF_JLE: + return BPF_JGT; + case BPF_JNE: + return BPF_JEQ; + case BPF_JSGT: + return BPF_JSLE; + case BPF_JSLT: + return BPF_JSGE; + case BPF_JSGE: + return BPF_JSLT; + case BPF_JSLE: + return BPF_JSGT; + } + return -1; +} + + +static inline signed long hppa_offset(int insn, int off, struct hppa_jit_context *ctx) +{ + signed long from, to; + + off++; /* BPF branch is from PC+1 */ + from = (insn > 0) ? ctx->offset[insn - 1] : 0; + to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; + return (to - from); +} + +/* does the signed value fits into a given number of bits ? */ +static inline int check_bits_int(signed long val, int bits) +{ + return ((val >= 0) && ((val >> bits) == 0)) || + ((val < 0) && (((~((u32)val)) >> (bits-1)) == 0)); +} + +/* can the signed value be used in relative code ? */ +static inline int relative_bits_ok(signed long val, int bits) +{ + return ((val >= 0) && (val < (1UL << (bits-1)))) || /* XXX */ + ((val < 0) && (((~((unsigned long)val)) >> (bits-1)) == 0) + && (val & (1UL << (bits-1)))); +} + +/* can the signed value be used in relative branches ? */ +static inline int relative_branch_ok(signed long val, int bits) +{ + return ((val >= 0) && (val < (1UL << (bits-2)))) || /* XXX */ + ((val < 0) && (((~((unsigned long)val)) < (1UL << (bits-2)))) + && (val & (1UL << (bits-1)))); +} + + +#define is_5b_int(val) check_bits_int(val, 5) + +static inline unsigned sign_unext(unsigned x, unsigned len) +{ + unsigned len_ones; + + len_ones = (1 << len) - 1; + return x & len_ones; +} + +static inline unsigned low_sign_unext(unsigned x, unsigned len) +{ + unsigned temp; + unsigned sign; + + sign = (x >> (len-1)) & 1; + temp = sign_unext (x, len-1); + return (temp << 1) | sign; +} + +static inline unsigned re_assemble_12(unsigned as12) +{ + return (( (as12 & 0x800) >> 11) + | ((as12 & 0x400) >> (10 - 2)) + | ((as12 & 0x3ff) << (1 + 2))); +} + +static inline unsigned re_assemble_14(unsigned as14) +{ + return (( (as14 & 0x1fff) << 1) + | ((as14 & 0x2000) >> 13)); +} + +#ifdef CONFIG_64BIT +static inline unsigned re_assemble_16(unsigned as16) +{ + unsigned s, t; + + /* Unusual 16-bit encoding, for wide mode only. */ + t = (as16 << 1) & 0xffff; + s = (as16 & 0x8000); + return (t ^ s ^ (s >> 1)) | (s >> 15); +} +#endif + +static inline unsigned re_assemble_17(unsigned as17) +{ + return (( (as17 & 0x10000) >> 16) + | ((as17 & 0x0f800) << (16 - 11)) + | ((as17 & 0x00400) >> (10 - 2)) + | ((as17 & 0x003ff) << (1 + 2))); +} + +static inline unsigned re_assemble_21(unsigned as21) +{ + return (( (as21 & 0x100000) >> 20) + | ((as21 & 0x0ffe00) >> 8) + | ((as21 & 0x000180) << 7) + | ((as21 & 0x00007c) << 14) + | ((as21 & 0x000003) << 12)); +} + +static inline unsigned re_assemble_22(unsigned as22) +{ + return (( (as22 & 0x200000) >> 21) + | ((as22 & 0x1f0000) << (21 - 16)) + | ((as22 & 0x00f800) << (16 - 11)) + | ((as22 & 0x000400) >> (10 - 2)) + | ((as22 & 0x0003ff) << (1 + 2))); +} + +/* Various HPPA instruction formats. */ +/* see https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf, appendix C */ + +static inline u32 hppa_t1_insn(u8 opcode, u8 b, u8 r, s16 im14) +{ + return ((opcode << 26) | (b << 21) | (r << 16) | re_assemble_14(im14)); +} + +static inline u32 hppa_t5_insn(u8 opcode, u8 tr, u32 val21) +{ + return ((opcode << 26) | (tr << 21) | re_assemble_21(val21)); +} + +static inline u32 hppa_t6_insn(u8 opcode, u8 r2, u8 r1, u8 c, u8 f, u8 ext6, u16 t) +{ + return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) | (f << 12) | + (ext6 << 6) | t); +} + +/* 7. Arithmetic immediate */ +static inline u32 hppa_t7_insn(u8 opcode, u8 r, u8 t, u32 im11) +{ + return ((opcode << 26) | (r << 21) | (t << 16) | low_sign_unext(im11, 11)); +} + +/* 10. Shift instructions */ +static inline u32 hppa_t10_insn(u8 opcode, u8 r2, u8 r1, u8 c, u8 ext3, u8 cp, u8 t) +{ + return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) | + (ext3 << 10) | (cp << 5) | t); +} + +/* 11. Conditional branch instructions */ +static inline u32 hppa_t11_insn(u8 opcode, u8 r2, u8 r1, u8 c, u32 w, u8 nop) +{ + u32 ra = re_assemble_12(w); + // ra = low_sign_unext(w,11) | (w & (1<<10) + return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (c << 13) | (nop << 1) | ra); +} + +/* 12. Branch instructions */ +static inline u32 hppa_t12_insn(u8 opcode, u8 rp, u32 w, u8 ext3, u8 nop) +{ + return ((opcode << 26) | (rp << 21) | (ext3 << 13) | (nop << 1) | re_assemble_17(w)); +} + +static inline u32 hppa_t12_L_insn(u8 opcode, u32 w, u8 nop) +{ + return ((opcode << 26) | (0x05 << 13) | (nop << 1) | re_assemble_22(w)); +} + +/* 21. Move to control register */ +static inline u32 hppa_t21_insn(u8 opcode, u8 r2, u8 r1, u8 ext8, u8 t) +{ + return ((opcode << 26) | (r2 << 21) | (r1 << 16) | (ext8 << 5) | t); +} + +/* Helper functions called by jit code on HPPA32 and HPPA64. */ + +u64 hppa_div64(u64 div, u64 divisor); +u64 hppa_div64_rem(u64 div, u64 divisor); + +/* Helper functions that emit HPPA instructions when possible. */ + +void bpf_jit_build_prologue(struct hppa_jit_context *ctx); +void bpf_jit_build_epilogue(struct hppa_jit_context *ctx); + +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx, + bool extra_pass); + +#endif /* _BPF_JIT_H */ diff --git a/arch/parisc/net/bpf_jit_comp32.c b/arch/parisc/net/bpf_jit_comp32.c new file mode 100644 index 000000000000..5ff0cf925fe9 --- /dev/null +++ b/arch/parisc/net/bpf_jit_comp32.c @@ -0,0 +1,1615 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for PA-RISC (32-bit) + * + * Copyright (c) 2023 Helge Deller <deller@gmx.de> + * + * The code is based on the BPF JIT compiler for RV64 by Björn Töpel and + * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan. + */ + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <linux/libgcc.h> +#include "bpf_jit.h" + +/* + * Stack layout during BPF program execution (note: stack grows up): + * + * high + * HPPA32 sp => +----------+ <= HPPA32 fp + * | saved sp | + * | saved rp | + * | ... | HPPA32 callee-saved registers + * | curr args| + * | local var| + * +----------+ <= (sp - 4 * NR_SAVED_REGISTERS) + * | lo(R9) | + * | hi(R9) | + * | lo(FP) | JIT scratch space for BPF registers + * | hi(FP) | + * | ... | + * +----------+ <= (sp - 4 * NR_SAVED_REGISTERS + * | | - 4 * BPF_JIT_SCRATCH_REGS) + * | | + * | ... | BPF program stack + * | | + * | ... | Function call stack + * | | + * +----------+ + * low + */ + +enum { + /* Stack layout - these are offsets from top of JIT scratch space. */ + BPF_R8_HI, + BPF_R8_LO, + BPF_R9_HI, + BPF_R9_LO, + BPF_FP_HI, + BPF_FP_LO, + BPF_AX_HI, + BPF_AX_LO, + BPF_R0_TEMP_HI, + BPF_R0_TEMP_LO, + BPF_JIT_SCRATCH_REGS, +}; + +/* Number of callee-saved registers stored to stack: rp, r3-r18. */ +#define NR_SAVED_REGISTERS (18 - 3 + 1 + 8) + +/* Offset from fp for BPF registers stored on stack. */ +#define STACK_OFFSET(k) (- (NR_SAVED_REGISTERS + k + 1)) +#define STACK_ALIGN FRAME_SIZE + +#define EXIT_PTR_LOAD(reg) hppa_ldw(-0x08, HPPA_REG_SP, reg) +#define EXIT_PTR_STORE(reg) hppa_stw(reg, -0x08, HPPA_REG_SP) +#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop) + +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) +#define TMP_REG_R0 (MAX_BPF_JIT_REG + 2) + +static const s8 regmap[][2] = { + /* Return value from in-kernel function, and exit value from eBPF. */ + [BPF_REG_0] = {HPPA_REG_RET0, HPPA_REG_RET1}, /* HI/LOW */ + + /* Arguments from eBPF program to in-kernel function. */ + [BPF_REG_1] = {HPPA_R(3), HPPA_R(4)}, + [BPF_REG_2] = {HPPA_R(5), HPPA_R(6)}, + [BPF_REG_3] = {HPPA_R(7), HPPA_R(8)}, + [BPF_REG_4] = {HPPA_R(9), HPPA_R(10)}, + [BPF_REG_5] = {HPPA_R(11), HPPA_R(12)}, + + [BPF_REG_6] = {HPPA_R(13), HPPA_R(14)}, + [BPF_REG_7] = {HPPA_R(15), HPPA_R(16)}, + /* + * Callee-saved registers that in-kernel function will preserve. + * Stored on the stack. + */ + [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, + [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, + + /* Read-only frame pointer to access BPF stack. Not needed. */ + [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, + + /* Temporary register for blinding constants. Stored on the stack. */ + [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, + /* + * Temporary registers used by the JIT to operate on registers stored + * on the stack. Save t0 and t1 to be used as temporaries in generated + * code. + */ + [TMP_REG_1] = {HPPA_REG_T3, HPPA_REG_T2}, + [TMP_REG_2] = {HPPA_REG_T5, HPPA_REG_T4}, + + /* temporary space for BPF_R0 during libgcc and millicode calls */ + [TMP_REG_R0] = {STACK_OFFSET(BPF_R0_TEMP_HI), STACK_OFFSET(BPF_R0_TEMP_LO)}, +}; + +static s8 hi(const s8 *r) +{ + return r[0]; +} + +static s8 lo(const s8 *r) +{ + return r[1]; +} + +static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx) +{ + REG_SET_SEEN(ctx, rd); + if (OPTIMIZE_HPPA && (rs == rd)) + return; + REG_SET_SEEN(ctx, rs); + emit(hppa_copy(rs, rd), ctx); +} + +static void emit_hppa_xor(const s8 r1, const s8 r2, const s8 r3, struct hppa_jit_context *ctx) +{ + REG_SET_SEEN(ctx, r1); + REG_SET_SEEN(ctx, r2); + REG_SET_SEEN(ctx, r3); + if (OPTIMIZE_HPPA && (r1 == r2)) { + emit(hppa_copy(HPPA_REG_ZERO, r3), ctx); + } else { + emit(hppa_xor(r1, r2, r3), ctx); + } +} + +static void emit_imm(const s8 rd, s32 imm, struct hppa_jit_context *ctx) +{ + u32 lower = im11(imm); + + REG_SET_SEEN(ctx, rd); + if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) { + emit(hppa_ldi(imm, rd), ctx); + return; + } + emit(hppa_ldil(imm, rd), ctx); + if (OPTIMIZE_HPPA && (lower == 0)) + return; + emit(hppa_ldo(lower, rd, rd), ctx); +} + +static void emit_imm32(const s8 *rd, s32 imm, struct hppa_jit_context *ctx) +{ + /* Emit immediate into lower bits. */ + REG_SET_SEEN(ctx, lo(rd)); + emit_imm(lo(rd), imm, ctx); + + /* Sign-extend into upper bits. */ + REG_SET_SEEN(ctx, hi(rd)); + if (imm >= 0) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + else + emit(hppa_ldi(-1, hi(rd)), ctx); +} + +static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo, + struct hppa_jit_context *ctx) +{ + emit_imm(hi(rd), imm_hi, ctx); + emit_imm(lo(rd), imm_lo, ctx); +} + +static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx) +{ + const s8 *r0 = regmap[BPF_REG_0]; + int i; + + if (is_tail_call) { + /* + * goto *(t0 + 4); + * Skips first instruction of prologue which initializes tail + * call counter. Assumes t0 contains address of target program, + * see emit_bpf_tail_call. + */ + emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx); + emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx); + /* in delay slot: */ + emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx); + + return; + } + + /* load epilogue function pointer and jump to it. */ + /* exit point is either directly below, or the outest TCC exit function */ + emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* NOTE: we are 32-bit and big-endian, so return lower 32-bit value */ + emit_hppa_copy(lo(r0), HPPA_REG_RET0, ctx); + + /* Restore callee-saved registers. */ + for (i = 3; i <= 18; i++) { + if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i))) + continue; + emit(hppa_ldw(-REG_SIZE * (8 + (i-3)), HPPA_REG_SP, HPPA_R(i)), ctx); + } + + /* load original return pointer (stored by outest TCC function) */ + emit(hppa_ldw(-0x14, HPPA_REG_SP, HPPA_REG_RP), ctx); + emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx); + /* in delay slot: */ + emit(hppa_ldw(-0x04, HPPA_REG_SP, HPPA_REG_SP), ctx); +} + +static bool is_stacked(s8 reg) +{ + return reg < 0; +} + +static const s8 *bpf_get_reg64_offset(const s8 *reg, const s8 *tmp, + u16 offset_sp, struct hppa_jit_context *ctx) +{ + if (is_stacked(hi(reg))) { + emit(hppa_ldw(REG_SIZE * hi(reg) - offset_sp, HPPA_REG_SP, hi(tmp)), ctx); + emit(hppa_ldw(REG_SIZE * lo(reg) - offset_sp, HPPA_REG_SP, lo(tmp)), ctx); + reg = tmp; + } + REG_SET_SEEN(ctx, hi(reg)); + REG_SET_SEEN(ctx, lo(reg)); + return reg; +} + +static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp, + struct hppa_jit_context *ctx) +{ + return bpf_get_reg64_offset(reg, tmp, 0, ctx); +} + +static const s8 *bpf_get_reg64_ref(const s8 *reg, const s8 *tmp, + bool must_load, struct hppa_jit_context *ctx) +{ + if (!OPTIMIZE_HPPA) + return bpf_get_reg64(reg, tmp, ctx); + + if (is_stacked(hi(reg))) { + if (must_load) + emit(hppa_ldw(REG_SIZE * hi(reg), HPPA_REG_SP, hi(tmp)), ctx); + reg = tmp; + } + REG_SET_SEEN(ctx, hi(reg)); + REG_SET_SEEN(ctx, lo(reg)); + return reg; +} + + +static void bpf_put_reg64(const s8 *reg, const s8 *src, + struct hppa_jit_context *ctx) +{ + if (is_stacked(hi(reg))) { + emit(hppa_stw(hi(src), REG_SIZE * hi(reg), HPPA_REG_SP), ctx); + emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx); + } +} + +static void bpf_save_R0(struct hppa_jit_context *ctx) +{ + bpf_put_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx); +} + +static void bpf_restore_R0(struct hppa_jit_context *ctx) +{ + bpf_get_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx); +} + + +static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp, + struct hppa_jit_context *ctx) +{ + if (is_stacked(lo(reg))) { + emit(hppa_ldw(REG_SIZE * lo(reg), HPPA_REG_SP, lo(tmp)), ctx); + reg = tmp; + } + REG_SET_SEEN(ctx, lo(reg)); + return reg; +} + +static const s8 *bpf_get_reg32_ref(const s8 *reg, const s8 *tmp, + struct hppa_jit_context *ctx) +{ + if (!OPTIMIZE_HPPA) + return bpf_get_reg32(reg, tmp, ctx); + + if (is_stacked(hi(reg))) { + reg = tmp; + } + REG_SET_SEEN(ctx, lo(reg)); + return reg; +} + +static void bpf_put_reg32(const s8 *reg, const s8 *src, + struct hppa_jit_context *ctx) +{ + if (is_stacked(lo(reg))) { + REG_SET_SEEN(ctx, lo(src)); + emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx); + if (1 && !ctx->prog->aux->verifier_zext) { + REG_SET_SEEN(ctx, hi(reg)); + emit(hppa_stw(HPPA_REG_ZERO, REG_SIZE * hi(reg), HPPA_REG_SP), ctx); + } + } else if (1 && !ctx->prog->aux->verifier_zext) { + REG_SET_SEEN(ctx, hi(reg)); + emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx); + } +} + +/* extern hppa millicode functions */ +extern void $$mulI(void); +extern void $$divU(void); +extern void $$remU(void); + +static void emit_call_millicode(void *func, const s8 arg0, + const s8 arg1, u8 opcode, struct hppa_jit_context *ctx) +{ + u32 func_addr; + + emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx); + emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx); + + /* libcgcc overwrites HPPA_REG_RET0/1, save temp. in dest. */ + if (arg0 != HPPA_REG_RET1) + bpf_save_R0(ctx); + + func_addr = (uintptr_t) dereference_function_descriptor(func); + emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx); + /* skip the following be_l instruction if divisor is zero. */ + if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) { + if (BPF_OP(opcode) == BPF_DIV) + emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx); + else + emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx); + emit(hppa_or_cond(HPPA_REG_ARG1, HPPA_REG_ZERO, 1, 0, HPPA_REG_ZERO), ctx); + } + /* Note: millicode functions use r31 as return pointer instead of rp */ + emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx); + emit(hppa_nop(), ctx); /* this nop is needed here for delay slot */ + + /* Note: millicode functions return result in RET1, not RET0 */ + emit_hppa_copy(HPPA_REG_RET1, arg0, ctx); + + /* restore HPPA_REG_RET0/1, temp. save in dest. */ + if (arg0 != HPPA_REG_RET1) + bpf_restore_R0(ctx); +} + +static void emit_call_libgcc_ll(void *func, const s8 *arg0, + const s8 *arg1, u8 opcode, struct hppa_jit_context *ctx) +{ + u32 func_addr; + + emit_hppa_copy(lo(arg0), HPPA_REG_ARG0, ctx); + emit_hppa_copy(hi(arg0), HPPA_REG_ARG1, ctx); + emit_hppa_copy(lo(arg1), HPPA_REG_ARG2, ctx); + emit_hppa_copy(hi(arg1), HPPA_REG_ARG3, ctx); + + /* libcgcc overwrites HPPA_REG_RET0/_RET1, so keep copy of R0 on stack */ + if (hi(arg0) != HPPA_REG_RET0) + bpf_save_R0(ctx); + + /* prepare stack */ + emit(hppa_ldo(2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx); + + func_addr = (uintptr_t) dereference_function_descriptor(func); + emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx); + /* zero out the following be_l instruction if divisor is 0 (and set default values) */ + if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) { + emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx); + if (BPF_OP(opcode) == BPF_DIV) + emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx); + else + emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx); + emit(hppa_or_cond(HPPA_REG_ARG2, HPPA_REG_ARG3, 1, 0, HPPA_REG_ZERO), ctx); + } + emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx); + emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx); + + /* restore stack */ + emit(hppa_ldo(-2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx); + + emit_hppa_copy(HPPA_REG_RET0, hi(arg0), ctx); + emit_hppa_copy(HPPA_REG_RET1, lo(arg0), ctx); + + /* restore HPPA_REG_RET0/_RET1 */ + if (hi(arg0) != HPPA_REG_RET0) + bpf_restore_R0(ctx); +} + +static void emit_jump(s32 paoff, bool force_far, + struct hppa_jit_context *ctx) +{ + unsigned long pc, addr; + + /* Note: allocate 2 instructions for jumps if force_far is set. */ + if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 17)) { + /* use BL,short branch followed by nop() */ + emit(hppa_bl(paoff - HPPA_BRANCH_DISPLACEMENT, HPPA_REG_ZERO), ctx); + if (force_far) + emit(hppa_nop(), ctx); + return; + } + + pc = (uintptr_t) &ctx->insns[ctx->ninsns]; + addr = pc + (paoff * HPPA_INSN_SIZE); + emit(hppa_ldil(addr, HPPA_REG_R31), ctx); + emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx); // be,l,n addr(sr4,r31), %sr0, %r31 +} + +static void emit_alu_i64(const s8 *dst, s32 imm, + struct hppa_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *rd; + + if (0 && op == BPF_MOV) + rd = bpf_get_reg64_ref(dst, tmp1, false, ctx); + else + rd = bpf_get_reg64(dst, tmp1, ctx); + + /* dst = dst OP imm */ + switch (op) { + case BPF_MOV: + emit_imm32(rd, imm, ctx); + break; + case BPF_AND: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + if (imm >= 0) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case BPF_OR: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + if (imm < 0) + emit_imm(hi(rd), -1, ctx); + break; + case BPF_XOR: + emit_imm(HPPA_REG_T0, imm, ctx); + emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx); + if (imm < 0) { + emit_imm(HPPA_REG_T0, -1, ctx); + emit_hppa_xor(hi(rd), HPPA_REG_T0, hi(rd), ctx); + } + break; + case BPF_LSH: + if (imm == 0) + break; + if (imm > 32) { + imm -= 32; + emit(hppa_zdep(lo(rd), imm, imm, hi(rd)), ctx); + emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx); + } else if (imm == 32) { + emit_hppa_copy(lo(rd), hi(rd), ctx); + emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx); + } else { + emit(hppa_shd(hi(rd), lo(rd), 32 - imm, hi(rd)), ctx); + emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx); + } + break; + case BPF_RSH: + if (imm == 0) + break; + if (imm > 32) { + imm -= 32; + emit(hppa_shr(hi(rd), imm, lo(rd)), ctx); + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + } else if (imm == 32) { + emit_hppa_copy(hi(rd), lo(rd), ctx); + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + } else { + emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx); + emit(hppa_shr(hi(rd), imm, hi(rd)), ctx); + } + break; + case BPF_ARSH: + if (imm == 0) + break; + if (imm > 32) { + imm -= 32; + emit(hppa_extrws(hi(rd), 31 - imm, imm, lo(rd)), ctx); + emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx); + } else if (imm == 32) { + emit_hppa_copy(hi(rd), lo(rd), ctx); + emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx); + } else { + emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx); + emit(hppa_extrws(hi(rd), 31 - imm, imm, hi(rd)), ctx); + } + break; + default: + WARN_ON(1); + } + + bpf_put_reg64(dst, rd, ctx); +} + +static void emit_alu_i32(const s8 *dst, s32 imm, + struct hppa_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *rd = bpf_get_reg32(dst, tmp1, ctx); + + if (op == BPF_MOV) + rd = bpf_get_reg32_ref(dst, tmp1, ctx); + else + rd = bpf_get_reg32(dst, tmp1, ctx); + + /* dst = dst OP imm */ + switch (op) { + case BPF_MOV: + emit_imm(lo(rd), imm, ctx); + break; + case BPF_ADD: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_add(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + break; + case BPF_SUB: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_sub(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + break; + case BPF_AND: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + break; + case BPF_OR: + emit_imm(HPPA_REG_T0, imm, ctx); + emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx); + break; + case BPF_XOR: + emit_imm(HPPA_REG_T0, imm, ctx); + emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx); + break; + case BPF_LSH: + if (imm != 0) + emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx); + break; + case BPF_RSH: + if (imm != 0) + emit(hppa_shr(lo(rd), imm, lo(rd)), ctx); + break; + case BPF_ARSH: + if (imm != 0) + emit(hppa_extrws(lo(rd), 31 - imm, imm, lo(rd)), ctx); + break; + default: + WARN_ON(1); + } + + bpf_put_reg32(dst, rd, ctx); +} + +static void emit_alu_r64(const s8 *dst, const s8 *src, + struct hppa_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + const s8 *rd; + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + + if (op == BPF_MOV) + rd = bpf_get_reg64_ref(dst, tmp1, false, ctx); + else + rd = bpf_get_reg64(dst, tmp1, ctx); + + /* dst = dst OP src */ + switch (op) { + case BPF_MOV: + emit_hppa_copy(lo(rs), lo(rd), ctx); + emit_hppa_copy(hi(rs), hi(rd), ctx); + break; + case BPF_ADD: + emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx); + emit(hppa_addc(hi(rd), hi(rs), hi(rd)), ctx); + break; + case BPF_SUB: + emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx); + emit(hppa_subb(hi(rd), hi(rs), hi(rd)), ctx); + break; + case BPF_AND: + emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx); + emit(hppa_and(hi(rd), hi(rs), hi(rd)), ctx); + break; + case BPF_OR: + emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx); + emit(hppa_or(hi(rd), hi(rs), hi(rd)), ctx); + break; + case BPF_XOR: + emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx); + emit_hppa_xor(hi(rd), hi(rs), hi(rd), ctx); + break; + case BPF_MUL: + emit_call_libgcc_ll(__muldi3, rd, rs, op, ctx); + break; + case BPF_DIV: + emit_call_libgcc_ll(&hppa_div64, rd, rs, op, ctx); + break; + case BPF_MOD: + emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, op, ctx); + break; + case BPF_LSH: + emit_call_libgcc_ll(__ashldi3, rd, rs, op, ctx); + break; + case BPF_RSH: + emit_call_libgcc_ll(__lshrdi3, rd, rs, op, ctx); + break; + case BPF_ARSH: + emit_call_libgcc_ll(__ashrdi3, rd, rs, op, ctx); + break; + case BPF_NEG: + emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx); + emit(hppa_subb(HPPA_REG_ZERO, hi(rd), hi(rd)), ctx); + break; + default: + WARN_ON(1); + } + + bpf_put_reg64(dst, rd, ctx); +} + +static void emit_alu_r32(const s8 *dst, const s8 *src, + struct hppa_jit_context *ctx, const u8 op) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + const s8 *rd; + const s8 *rs = bpf_get_reg32(src, tmp2, ctx); + + if (op == BPF_MOV) + rd = bpf_get_reg32_ref(dst, tmp1, ctx); + else + rd = bpf_get_reg32(dst, tmp1, ctx); + + /* dst = dst OP src */ + switch (op) { + case BPF_MOV: + emit_hppa_copy(lo(rs), lo(rd), ctx); + break; + case BPF_ADD: + emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx); + break; + case BPF_SUB: + emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx); + break; + case BPF_AND: + emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx); + break; + case BPF_OR: + emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx); + break; + case BPF_XOR: + emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx); + break; + case BPF_MUL: + emit_call_millicode($$mulI, lo(rd), lo(rs), op, ctx); + break; + case BPF_DIV: + emit_call_millicode($$divU, lo(rd), lo(rs), op, ctx); + break; + case BPF_MOD: + emit_call_millicode($$remU, lo(rd), lo(rs), op, ctx); + break; + case BPF_LSH: + emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx); + emit(hppa_mtsar(HPPA_REG_T0), ctx); + emit(hppa_depwz_sar(lo(rd), lo(rd)), ctx); + break; + case BPF_RSH: + emit(hppa_mtsar(lo(rs)), ctx); + emit(hppa_shrpw_sar(lo(rd), lo(rd)), ctx); + break; + case BPF_ARSH: /* sign extending arithmetic shift right */ + // emit(hppa_beq(lo(rs), HPPA_REG_ZERO, 2), ctx); + emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx); + emit(hppa_mtsar(HPPA_REG_T0), ctx); + emit(hppa_extrws_sar(lo(rd), lo(rd)), ctx); + break; + case BPF_NEG: + emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx); // sub r0,rd,rd + break; + default: + WARN_ON(1); + } + + bpf_put_reg32(dst, rd, ctx); +} + +static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 paoff, + struct hppa_jit_context *ctx, const u8 op) +{ + int e, s = ctx->ninsns; + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + + const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx); + const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx); + + /* + * NO_JUMP skips over the rest of the instructions and the + * emit_jump, meaning the BPF branch is not taken. + * JUMP skips directly to the emit_jump, meaning + * the BPF branch is taken. + * + * The fallthrough case results in the BPF branch being taken. + */ +#define NO_JUMP(idx) (2 + (idx) - 1) +#define JUMP(idx) (0 + (idx) - 1) + + switch (op) { + case BPF_JEQ: + emit(hppa_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JGT: + emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JLT: + emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JGE: + emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JLE: + emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JNE: + emit(hppa_bne(hi(rs1), hi(rs2), JUMP(1)), ctx); + emit(hppa_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSGT: + emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSLT: + emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSGE: + emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSLE: + emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx); + emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx); + emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx); + break; + case BPF_JSET: + emit(hppa_and(hi(rs1), hi(rs2), HPPA_REG_T0), ctx); + emit(hppa_and(lo(rs1), lo(rs2), HPPA_REG_T1), ctx); + emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, JUMP(1)), ctx); + emit(hppa_beq(HPPA_REG_T1, HPPA_REG_ZERO, NO_JUMP(0)), ctx); + break; + default: + WARN_ON(1); + } + +#undef NO_JUMP +#undef JUMP + + e = ctx->ninsns; + /* Adjust for extra insns. */ + paoff -= (e - s); + emit_jump(paoff, true, ctx); + return 0; +} + +static int emit_bcc(u8 op, u8 rd, u8 rs, int paoff, struct hppa_jit_context *ctx) +{ + int e, s; + bool far = false; + int off; + + if (op == BPF_JSET) { + /* + * BPF_JSET is a special case: it has no inverse so we always + * treat it as a far branch. + */ + emit(hppa_and(rd, rs, HPPA_REG_T0), ctx); + paoff -= 1; /* reduce offset due to hppa_and() above */ + rd = HPPA_REG_T0; + rs = HPPA_REG_ZERO; + op = BPF_JNE; + } + + s = ctx->ninsns; + + if (!relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 12)) { + op = invert_bpf_cond(op); + far = true; + } + + /* + * For a far branch, the condition is negated and we jump over the + * branch itself, and the three instructions from emit_jump. + * For a near branch, just use paoff. + */ + off = far ? (HPPA_BRANCH_DISPLACEMENT - 1) : paoff - HPPA_BRANCH_DISPLACEMENT; + + switch (op) { + /* IF (dst COND src) JUMP off */ + case BPF_JEQ: + emit(hppa_beq(rd, rs, off), ctx); + break; + case BPF_JGT: + emit(hppa_bgtu(rd, rs, off), ctx); + break; + case BPF_JLT: + emit(hppa_bltu(rd, rs, off), ctx); + break; + case BPF_JGE: + emit(hppa_bgeu(rd, rs, off), ctx); + break; + case BPF_JLE: + emit(hppa_bleu(rd, rs, off), ctx); + break; + case BPF_JNE: + emit(hppa_bne(rd, rs, off), ctx); + break; + case BPF_JSGT: + emit(hppa_bgt(rd, rs, off), ctx); + break; + case BPF_JSLT: + emit(hppa_blt(rd, rs, off), ctx); + break; + case BPF_JSGE: + emit(hppa_bge(rd, rs, off), ctx); + break; + case BPF_JSLE: + emit(hppa_ble(rd, rs, off), ctx); + break; + default: + WARN_ON(1); + } + + if (far) { + e = ctx->ninsns; + /* Adjust for extra insns. */ + paoff -= (e - s); + emit_jump(paoff, true, ctx); + } + return 0; +} + +static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 paoff, + struct hppa_jit_context *ctx, const u8 op) +{ + int e, s = ctx->ninsns; + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + + const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx); + const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx); + + e = ctx->ninsns; + /* Adjust for extra insns. */ + paoff -= (e - s); + + if (emit_bcc(op, lo(rs1), lo(rs2), paoff, ctx)) + return -1; + + return 0; +} + +static void emit_call(bool fixed, u64 addr, struct hppa_jit_context *ctx) +{ + const s8 *tmp = regmap[TMP_REG_1]; + const s8 *r0 = regmap[BPF_REG_0]; + const s8 *reg; + const int offset_sp = 2 * STACK_ALIGN; + + /* prepare stack */ + emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx); + + /* load R1 & R2 in registers, R3-R5 to stack. */ + reg = bpf_get_reg64_offset(regmap[BPF_REG_5], tmp, offset_sp, ctx); + emit(hppa_stw(hi(reg), -0x48, HPPA_REG_SP), ctx); + emit(hppa_stw(lo(reg), -0x44, HPPA_REG_SP), ctx); + + reg = bpf_get_reg64_offset(regmap[BPF_REG_4], tmp, offset_sp, ctx); + emit(hppa_stw(hi(reg), -0x40, HPPA_REG_SP), ctx); + emit(hppa_stw(lo(reg), -0x3c, HPPA_REG_SP), ctx); + + reg = bpf_get_reg64_offset(regmap[BPF_REG_3], tmp, offset_sp, ctx); + emit(hppa_stw(hi(reg), -0x38, HPPA_REG_SP), ctx); + emit(hppa_stw(lo(reg), -0x34, HPPA_REG_SP), ctx); + + reg = bpf_get_reg64_offset(regmap[BPF_REG_2], tmp, offset_sp, ctx); + emit_hppa_copy(hi(reg), HPPA_REG_ARG3, ctx); + emit_hppa_copy(lo(reg), HPPA_REG_ARG2, ctx); + + reg = bpf_get_reg64_offset(regmap[BPF_REG_1], tmp, offset_sp, ctx); + emit_hppa_copy(hi(reg), HPPA_REG_ARG1, ctx); + emit_hppa_copy(lo(reg), HPPA_REG_ARG0, ctx); + + /* backup TCC */ + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx); + + /* + * Use ldil() to load absolute address. Don't use emit_imm as the + * number of emitted instructions should not depend on the value of + * addr. + */ + emit(hppa_ldil(addr, HPPA_REG_R31), ctx); + emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx); + /* set return address in delay slot */ + emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx); + + /* restore TCC */ + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx); + + /* restore stack */ + emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx); + + /* set return value. */ + emit_hppa_copy(HPPA_REG_RET0, hi(r0), ctx); + emit_hppa_copy(HPPA_REG_RET1, lo(r0), ctx); +} + +static int emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx) +{ + /* + * R1 -> &ctx + * R2 -> &array + * R3 -> index + */ + int off; + const s8 *arr_reg = regmap[BPF_REG_2]; + const s8 *idx_reg = regmap[BPF_REG_3]; + struct bpf_array bpfa; + struct bpf_prog bpfp; + + /* get address of TCC main exit function for error case into rp */ + emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx); + + /* max_entries = array->map.max_entries; */ + off = offsetof(struct bpf_array, map.max_entries); + BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4); + emit(hppa_ldw(off, lo(arr_reg), HPPA_REG_T1), ctx); + + /* + * if (index >= max_entries) + * goto out; + */ + emit(hppa_bltu(lo(idx_reg), HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * if (--tcc < 0) + * goto out; + */ + REG_FORCE_SEEN(ctx, HPPA_REG_TCC); + emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx); + emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 4); + emit(hppa_sh2add(lo(idx_reg), lo(arr_reg), HPPA_REG_T0), ctx); + off = offsetof(struct bpf_array, ptrs); + BUILD_BUG_ON(!relative_bits_ok(off, 11)); + emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx); + emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * tcc = temp_tcc; + * goto *(prog->bpf_func + 4); + */ + off = offsetof(struct bpf_prog, bpf_func); + BUILD_BUG_ON(!relative_bits_ok(off, 11)); + BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 4); + emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx); + /* Epilogue jumps to *(t0 + 4). */ + __build_epilogue(true, ctx); + return 0; +} + +static int emit_load_r64(const s8 *dst, const s8 *src, s16 off, + struct hppa_jit_context *ctx, const u8 size) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + const s8 *rd = bpf_get_reg64_ref(dst, tmp1, ctx->prog->aux->verifier_zext, ctx); + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + s8 srcreg; + + /* need to calculate address since offset does not fit in 14 bits? */ + if (relative_bits_ok(off, 14)) + srcreg = lo(rs); + else { + /* need to use R1 here, since addil puts result into R1 */ + srcreg = HPPA_REG_R1; + emit(hppa_addil(off, lo(rs)), ctx); + off = im11(off); + } + + /* LDX: dst = *(size *)(src + off) */ + switch (size) { + case BPF_B: + emit(hppa_ldb(off + 0, srcreg, lo(rd)), ctx); + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case BPF_H: + emit(hppa_ldh(off + 0, srcreg, lo(rd)), ctx); + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case BPF_W: + emit(hppa_ldw(off + 0, srcreg, lo(rd)), ctx); + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case BPF_DW: + emit(hppa_ldw(off + 0, srcreg, hi(rd)), ctx); + emit(hppa_ldw(off + 4, srcreg, lo(rd)), ctx); + break; + } + + bpf_put_reg64(dst, rd, ctx); + return 0; +} + +static int emit_store_r64(const s8 *dst, const s8 *src, s16 off, + struct hppa_jit_context *ctx, const u8 size, + const u8 mode) +{ + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + const s8 *rs = bpf_get_reg64(src, tmp2, ctx); + s8 dstreg; + + /* need to calculate address since offset does not fit in 14 bits? */ + if (relative_bits_ok(off, 14)) + dstreg = lo(rd); + else { + /* need to use R1 here, since addil puts result into R1 */ + dstreg = HPPA_REG_R1; + emit(hppa_addil(off, lo(rd)), ctx); + off = im11(off); + } + + /* ST: *(size *)(dst + off) = imm */ + switch (size) { + case BPF_B: + emit(hppa_stb(lo(rs), off + 0, dstreg), ctx); + break; + case BPF_H: + emit(hppa_sth(lo(rs), off + 0, dstreg), ctx); + break; + case BPF_W: + emit(hppa_stw(lo(rs), off + 0, dstreg), ctx); + break; + case BPF_DW: + emit(hppa_stw(hi(rs), off + 0, dstreg), ctx); + emit(hppa_stw(lo(rs), off + 4, dstreg), ctx); + break; + } + + return 0; +} + +static void emit_rev16(const s8 rd, struct hppa_jit_context *ctx) +{ + emit(hppa_extru(rd, 23, 8, HPPA_REG_T1), ctx); + emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx); + emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx); +} + +static void emit_rev32(const s8 rs, const s8 rd, struct hppa_jit_context *ctx) +{ + emit(hppa_shrpw(rs, rs, 16, HPPA_REG_T1), ctx); + emit(hppa_depwz(HPPA_REG_T1, 15, 8, HPPA_REG_T1), ctx); + emit(hppa_shrpw(rs, HPPA_REG_T1, 8, rd), ctx); +} + +static void emit_zext64(const s8 *dst, struct hppa_jit_context *ctx) +{ + const s8 *rd; + const s8 *tmp1 = regmap[TMP_REG_1]; + + rd = bpf_get_reg64(dst, tmp1, ctx); + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + bpf_put_reg64(dst, rd, ctx); +} + +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx, + bool extra_pass) +{ + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || + BPF_CLASS(insn->code) == BPF_JMP; + int s, e, paoff, i = insn - ctx->prog->insnsi; + u8 code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + + const s8 *dst = regmap[insn->dst_reg]; + const s8 *src = regmap[insn->src_reg]; + const s8 *tmp1 = regmap[TMP_REG_1]; + const s8 *tmp2 = regmap[TMP_REG_2]; + + if (0) printk("CLASS %03d CODE %#02x ALU64:%d BPF_SIZE %#02x " + "BPF_CODE %#02x src_reg %d dst_reg %d\n", + BPF_CLASS(code), code, (code & BPF_ALU64) ? 1:0, BPF_SIZE(code), + BPF_OP(code), insn->src_reg, insn->dst_reg); + + switch (code) { + /* dst = src */ + case BPF_ALU64 | BPF_MOV | BPF_X: + + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_K: + + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_K: + + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_K: + + case BPF_ALU64 | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + if (BPF_SRC(code) == BPF_K) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + emit_alu_r64(dst, src, ctx, BPF_OP(code)); + break; + + /* dst = -dst */ + case BPF_ALU64 | BPF_NEG: + emit_alu_r64(dst, tmp2, ctx, BPF_OP(code)); + break; + + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit_alu_i64(dst, imm, ctx, BPF_OP(code)); + break; + + case BPF_ALU | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext. */ + emit_zext64(dst, ctx); + break; + } + fallthrough; + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_DIV | BPF_K: + + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + if (BPF_SRC(code) == BPF_K) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + emit_alu_r32(dst, src, ctx, BPF_OP(code)); + break; + + /* dst = dst OP imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + /* + * mul,div,mod are handled in the BPF_X case. + */ + emit_alu_i32(dst, imm, ctx, BPF_OP(code)); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + /* + * src is ignored---choose tmp2 as a dummy register since it + * is not on the stack. + */ + emit_alu_r32(dst, tmp2, ctx, BPF_OP(code)); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_BE: + { + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + emit(hppa_extru(lo(rd), 31, 16, lo(rd)), ctx); + fallthrough; + case 32: + /* zero-extend 32 bits into 64 bits */ + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case 64: + /* Do nothing. */ + break; + default: + pr_err("bpf-jit: BPF_END imm %d invalid\n", imm); + return -1; + } + + bpf_put_reg64(dst, rd, ctx); + break; + } + + case BPF_ALU | BPF_END | BPF_FROM_LE: + { + const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); + + switch (imm) { + case 16: + emit_rev16(lo(rd), ctx); + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case 32: + emit_rev32(lo(rd), lo(rd), ctx); + if (!ctx->prog->aux->verifier_zext) + emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx); + break; + case 64: + /* Swap upper and lower halves, then each half. */ + emit_hppa_copy(hi(rd), HPPA_REG_T0, ctx); + emit_rev32(lo(rd), hi(rd), ctx); + emit_rev32(HPPA_REG_T0, lo(rd), ctx); + break; + default: + pr_err("bpf-jit: BPF_END imm %d invalid\n", imm); + return -1; + } + + bpf_put_reg64(dst, rd, ctx); + break; + } + /* JUMP off */ + case BPF_JMP | BPF_JA: + paoff = hppa_offset(i, off, ctx); + emit_jump(paoff, false, ctx); + break; + /* function call */ + case BPF_JMP | BPF_CALL: + { + bool fixed; + int ret; + u64 addr; + + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, + &fixed); + if (ret < 0) + return ret; + emit_call(fixed, addr, ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + REG_SET_SEEN_ALL(ctx); + if (emit_bpf_tail_call(i, ctx)) + return -1; + break; + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + paoff = hppa_offset(i, off, ctx); + if (BPF_SRC(code) == BPF_K) { + s = ctx->ninsns; + emit_imm32(tmp2, imm, ctx); + src = tmp2; + e = ctx->ninsns; + paoff -= (e - s); + } + if (is64) + emit_branch_r64(dst, src, paoff, ctx, BPF_OP(code)); + else + emit_branch_r32(dst, src, paoff, ctx, BPF_OP(code)); + break; + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + /* load epilogue function pointer and jump to it. */ + emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + struct bpf_insn insn1 = insn[1]; + u32 upper = insn1.imm; + u32 lower = imm; + const s8 *rd = bpf_get_reg64_ref(dst, tmp1, false, ctx); + + if (0 && bpf_pseudo_func(insn)) { + WARN_ON(upper); /* we are 32-bit! */ + upper = 0; + lower = (uintptr_t) dereference_function_descriptor(lower); + } + + emit_imm64(rd, upper, lower, ctx); + bpf_put_reg64(dst, rd, ctx); + return 1; + } + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code))) + return -1; + break; + + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + if (BPF_CLASS(code) == BPF_ST) { + emit_imm32(tmp2, imm, ctx); + src = tmp2; + } + + if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code), + BPF_MODE(code))) + return -1; + break; + + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + pr_info_once( + "bpf-jit: not supported: atomic operation %02x ***\n", + insn->imm); + return -EFAULT; + + default: + pr_err("bpf-jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +void bpf_jit_build_prologue(struct hppa_jit_context *ctx) +{ + const s8 *tmp = regmap[TMP_REG_1]; + const s8 *dst, *reg; + int stack_adjust = 0; + int i; + unsigned long addr; + int bpf_stack_adjust; + + /* + * stack on hppa grows up, so if tail calls are used we need to + * allocate the maximum stack size + */ + if (REG_ALL_SEEN(ctx)) + bpf_stack_adjust = MAX_BPF_STACK; + else + bpf_stack_adjust = ctx->prog->aux->stack_depth; + bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN); + + /* make space for callee-saved registers. */ + stack_adjust += NR_SAVED_REGISTERS * REG_SIZE; + /* make space for BPF registers on stack. */ + stack_adjust += BPF_JIT_SCRATCH_REGS * REG_SIZE; + /* make space for BPF stack. */ + stack_adjust += bpf_stack_adjust; + /* round up for stack alignment. */ + stack_adjust = round_up(stack_adjust, STACK_ALIGN); + + /* + * The first instruction sets the tail-call-counter (TCC) register. + * This instruction is skipped by tail calls. + * Use a temporary register instead of a caller-saved register initially. + */ + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx); + + /* + * skip all initializations when called as BPF TAIL call. + */ + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx); + emit(hppa_bne(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, ctx->prologue_len - 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + + /* set up hppa stack frame. */ + emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx); // copy sp,r1 (=prev_sp) + emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx); // ldo stack_adjust(sp),sp (increase stack) + emit(hppa_stw(HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx); // stw prev_sp,-0x04(sp) + emit(hppa_stw(HPPA_REG_RP, -0x14, HPPA_REG_SP), ctx); // stw rp,-0x14(sp) + + REG_FORCE_SEEN(ctx, HPPA_REG_T0); + REG_FORCE_SEEN(ctx, HPPA_REG_T1); + REG_FORCE_SEEN(ctx, HPPA_REG_T2); + REG_FORCE_SEEN(ctx, HPPA_REG_T3); + REG_FORCE_SEEN(ctx, HPPA_REG_T4); + REG_FORCE_SEEN(ctx, HPPA_REG_T5); + + /* save callee-save registers. */ + for (i = 3; i <= 18; i++) { + if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i))) + continue; + emit(hppa_stw(HPPA_R(i), -REG_SIZE * (8 + (i-3)), HPPA_REG_SP), ctx); // stw ri,-save_area(sp) + } + + /* + * now really set the tail call counter (TCC) register. + */ + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx); + + /* + * save epilogue function pointer for outer TCC call chain. + * The main TCC call stores the final RP on stack. + */ + addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset]; + /* skip first two instructions of exit function, which jump to exit */ + addr += 2 * HPPA_INSN_SIZE; + emit(hppa_ldil(addr, HPPA_REG_T2), ctx); + emit(hppa_ldo(im11(addr), HPPA_REG_T2, HPPA_REG_T2), ctx); + emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx); + + /* load R1 & R2 from registers, R3-R5 from stack. */ + /* use HPPA_REG_R1 which holds the old stack value */ + dst = regmap[BPF_REG_5]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, hi(reg))) + emit(hppa_ldw(-0x48, HPPA_REG_R1, hi(reg)), ctx); + if (REG_WAS_SEEN(ctx, lo(reg))) + emit(hppa_ldw(-0x44, HPPA_REG_R1, lo(reg)), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + dst = regmap[BPF_REG_4]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, hi(reg))) + emit(hppa_ldw(-0x40, HPPA_REG_R1, hi(reg)), ctx); + if (REG_WAS_SEEN(ctx, lo(reg))) + emit(hppa_ldw(-0x3c, HPPA_REG_R1, lo(reg)), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + dst = regmap[BPF_REG_3]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, hi(reg))) + emit(hppa_ldw(-0x38, HPPA_REG_R1, hi(reg)), ctx); + if (REG_WAS_SEEN(ctx, lo(reg))) + emit(hppa_ldw(-0x34, HPPA_REG_R1, lo(reg)), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + dst = regmap[BPF_REG_2]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, hi(reg))) + emit_hppa_copy(HPPA_REG_ARG3, hi(reg), ctx); + if (REG_WAS_SEEN(ctx, lo(reg))) + emit_hppa_copy(HPPA_REG_ARG2, lo(reg), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + dst = regmap[BPF_REG_1]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, hi(reg))) + emit_hppa_copy(HPPA_REG_ARG1, hi(reg), ctx); + if (REG_WAS_SEEN(ctx, lo(reg))) + emit_hppa_copy(HPPA_REG_ARG0, lo(reg), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + /* Set up BPF frame pointer. */ + dst = regmap[BPF_REG_FP]; + reg = bpf_get_reg64_ref(dst, tmp, false, ctx); + if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) { + if (REG_WAS_SEEN(ctx, lo(reg))) + emit(hppa_ldo(-REG_SIZE * (NR_SAVED_REGISTERS + BPF_JIT_SCRATCH_REGS), + HPPA_REG_SP, lo(reg)), ctx); + if (REG_WAS_SEEN(ctx, hi(reg))) + emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx); + bpf_put_reg64(dst, tmp, ctx); + } + + emit(hppa_nop(), ctx); +} + +void bpf_jit_build_epilogue(struct hppa_jit_context *ctx) +{ + __build_epilogue(false, ctx); +} diff --git a/arch/parisc/net/bpf_jit_comp64.c b/arch/parisc/net/bpf_jit_comp64.c new file mode 100644 index 000000000000..54b0d5e25e02 --- /dev/null +++ b/arch/parisc/net/bpf_jit_comp64.c @@ -0,0 +1,1209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for PA-RISC (64-bit) + * + * Copyright(c) 2023 Helge Deller <deller@gmx.de> + * + * The code is based on the BPF JIT compiler for RV64 by Björn Töpel. + * + * TODO: + * - check if bpf_jit_needs_zext() is needed (currently enabled) + * - implement arch_prepare_bpf_trampoline(), poke(), ... + */ + +#include <linux/bitfield.h> +#include <linux/bpf.h> +#include <linux/filter.h> +#include <linux/libgcc.h> +#include "bpf_jit.h" + +static const int regmap[] = { + [BPF_REG_0] = HPPA_REG_RET0, + [BPF_REG_1] = HPPA_R(5), + [BPF_REG_2] = HPPA_R(6), + [BPF_REG_3] = HPPA_R(7), + [BPF_REG_4] = HPPA_R(8), + [BPF_REG_5] = HPPA_R(9), + [BPF_REG_6] = HPPA_R(10), + [BPF_REG_7] = HPPA_R(11), + [BPF_REG_8] = HPPA_R(12), + [BPF_REG_9] = HPPA_R(13), + [BPF_REG_FP] = HPPA_R(14), + [BPF_REG_AX] = HPPA_R(15), +}; + +/* + * Stack layout during BPF program execution (note: stack grows up): + * + * high + * HPPA64 sp => +----------+ <= HPPA64 fp + * | saved sp | + * | saved rp | + * | ... | HPPA64 callee-saved registers + * | curr args| + * | local var| + * +----------+ <= (BPF FP) + * | | + * | ... | BPF program stack + * | | + * | ... | Function call stack + * | | + * +----------+ + * low + */ + +/* Offset from fp for BPF registers stored on stack. */ +#define STACK_ALIGN FRAME_SIZE + +#define EXIT_PTR_LOAD(reg) hppa64_ldd_im16(-FRAME_SIZE, HPPA_REG_SP, reg) +#define EXIT_PTR_STORE(reg) hppa64_std_im16(reg, -FRAME_SIZE, HPPA_REG_SP) +#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop) + +static u8 bpf_to_hppa_reg(int bpf_reg, struct hppa_jit_context *ctx) +{ + u8 reg = regmap[bpf_reg]; + + REG_SET_SEEN(ctx, reg); + return reg; +}; + +static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx) +{ + REG_SET_SEEN(ctx, rd); + if (OPTIMIZE_HPPA && (rs == rd)) + return; + REG_SET_SEEN(ctx, rs); + emit(hppa_copy(rs, rd), ctx); +} + +static void emit_hppa64_depd(u8 src, u8 pos, u8 len, u8 target, bool no_zero, struct hppa_jit_context *ctx) +{ + int c; + + pos &= (BITS_PER_LONG - 1); + pos = 63 - pos; + len = 64 - len; + c = (len < 32) ? 0x4 : 0; + c |= (pos >= 32) ? 0x2 : 0; + c |= (no_zero) ? 0x1 : 0; + emit(hppa_t10_insn(0x3c, target, src, 0, c, pos & 0x1f, len & 0x1f), ctx); +} + +static void emit_hppa64_shld(u8 src, int num, u8 target, struct hppa_jit_context *ctx) +{ + emit_hppa64_depd(src, 63-num, 64-num, target, 0, ctx); +} + +static void emit_hppa64_extrd(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx) +{ + int c; + + pos &= (BITS_PER_LONG - 1); + len = 64 - len; + c = (len < 32) ? 0x4 : 0; + c |= (pos >= 32) ? 0x2 : 0; + c |= signed_op ? 0x1 : 0; + emit(hppa_t10_insn(0x36, src, target, 0, c, pos & 0x1f, len & 0x1f), ctx); +} + +static void emit_hppa64_extrw(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx) +{ + int c; + + pos &= (32 - 1); + len = 32 - len; + c = 0x06 | (signed_op ? 1 : 0); + emit(hppa_t10_insn(0x34, src, target, 0, c, pos, len), ctx); +} + +#define emit_hppa64_zext32(r, target, ctx) \ + emit_hppa64_extrd(r, 63, 32, target, false, ctx) +#define emit_hppa64_sext32(r, target, ctx) \ + emit_hppa64_extrd(r, 63, 32, target, true, ctx) + +static void emit_hppa64_shrd(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx) +{ + emit_hppa64_extrd(src, 63-num, 64-num, target, signed_op, ctx); +} + +static void emit_hppa64_shrw(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx) +{ + emit_hppa64_extrw(src, 31-num, 32-num, target, signed_op, ctx); +} + +/* Emit variable-length instructions for 32-bit imm */ +static void emit_imm32(u8 rd, s32 imm, struct hppa_jit_context *ctx) +{ + u32 lower = im11(imm); + + REG_SET_SEEN(ctx, rd); + if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) { + emit(hppa_ldi(imm, rd), ctx); + return; + } + if (OPTIMIZE_HPPA && lower == imm) { + emit(hppa_ldo(lower, HPPA_REG_ZERO, rd), ctx); + return; + } + emit(hppa_ldil(imm, rd), ctx); + if (OPTIMIZE_HPPA && (lower == 0)) + return; + emit(hppa_ldo(lower, rd, rd), ctx); +} + +static bool is_32b_int(s64 val) +{ + return val == (s32) val; +} + +/* Emit variable-length instructions for 64-bit imm */ +static void emit_imm(u8 rd, s64 imm, u8 tmpreg, struct hppa_jit_context *ctx) +{ + u32 upper32; + + /* get lower 32-bits into rd, sign extended */ + emit_imm32(rd, imm, ctx); + + /* do we have upper 32-bits too ? */ + if (OPTIMIZE_HPPA && is_32b_int(imm)) + return; + + /* load upper 32-bits into lower tmpreg and deposit into rd */ + upper32 = imm >> 32; + if (upper32 || !OPTIMIZE_HPPA) { + emit_imm32(tmpreg, upper32, ctx); + emit_hppa64_depd(tmpreg, 31, 32, rd, 1, ctx); + } else + emit_hppa64_depd(HPPA_REG_ZERO, 31, 32, rd, 1, ctx); + +} + +static int emit_jump(signed long paoff, bool force_far, + struct hppa_jit_context *ctx) +{ + unsigned long pc, addr; + + /* Note: Use 2 instructions for jumps if force_far is set. */ + if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 22)) { + /* use BL,long branch followed by nop() */ + emit(hppa64_bl_long(paoff - HPPA_BRANCH_DISPLACEMENT), ctx); + if (force_far) + emit(hppa_nop(), ctx); + return 0; + } + + pc = (uintptr_t) &ctx->insns[ctx->ninsns]; + addr = pc + (paoff * HPPA_INSN_SIZE); + /* even the 64-bit kernel runs in memory below 4GB */ + if (WARN_ON_ONCE(addr >> 32)) + return -E2BIG; + emit(hppa_ldil(addr, HPPA_REG_R31), ctx); + emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx); + return 0; +} + +static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx) +{ + int i; + + if (is_tail_call) { + /* + * goto *(t0 + 4); + * Skips first instruction of prologue which initializes tail + * call counter. Assumes t0 contains address of target program, + * see emit_bpf_tail_call. + */ + emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx); + emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx); + /* in delay slot: */ + emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx); + + return; + } + + /* load epilogue function pointer and jump to it. */ + /* exit point is either at next instruction, or the outest TCC exit function */ + emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* NOTE: we are 64-bit and big-endian, so return lower sign-extended 32-bit value */ + emit_hppa64_sext32(regmap[BPF_REG_0], HPPA_REG_RET0, ctx); + + /* Restore callee-saved registers. */ + for (i = 3; i <= 15; i++) { + if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i))) + continue; + emit(hppa64_ldd_im16(-REG_SIZE * i, HPPA_REG_SP, HPPA_R(i)), ctx); + } + + /* load original return pointer (stored by outest TCC function) */ + emit(hppa64_ldd_im16(-2*REG_SIZE, HPPA_REG_SP, HPPA_REG_RP), ctx); + emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx); + /* in delay slot: */ + emit(hppa64_ldd_im5(-REG_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx); + + emit(hppa_nop(), ctx); // XXX WARUM einer zu wenig ?? +} + +static int emit_branch(u8 op, u8 rd, u8 rs, signed long paoff, + struct hppa_jit_context *ctx) +{ + int e, s; + bool far = false; + int off; + + if (op == BPF_JSET) { + /* + * BPF_JSET is a special case: it has no inverse so translate + * to and() function and compare against zero + */ + emit(hppa_and(rd, rs, HPPA_REG_T0), ctx); + paoff -= 1; /* reduce offset due to hppa_and() above */ + rd = HPPA_REG_T0; + rs = HPPA_REG_ZERO; + op = BPF_JNE; + } + + /* set start after BPF_JSET */ + s = ctx->ninsns; + + if (!relative_branch_ok(paoff - HPPA_BRANCH_DISPLACEMENT + 1, 12)) { + op = invert_bpf_cond(op); + far = true; + } + + /* + * For a far branch, the condition is negated and we jump over the + * branch itself, and the two instructions from emit_jump. + * For a near branch, just use paoff. + */ + off = far ? (2 - HPPA_BRANCH_DISPLACEMENT) : paoff - HPPA_BRANCH_DISPLACEMENT; + + switch (op) { + /* IF (dst COND src) JUMP off */ + case BPF_JEQ: + emit(hppa_beq(rd, rs, off), ctx); + break; + case BPF_JGT: + emit(hppa_bgtu(rd, rs, off), ctx); + break; + case BPF_JLT: + emit(hppa_bltu(rd, rs, off), ctx); + break; + case BPF_JGE: + emit(hppa_bgeu(rd, rs, off), ctx); + break; + case BPF_JLE: + emit(hppa_bleu(rd, rs, off), ctx); + break; + case BPF_JNE: + emit(hppa_bne(rd, rs, off), ctx); + break; + case BPF_JSGT: + emit(hppa_bgt(rd, rs, off), ctx); + break; + case BPF_JSLT: + emit(hppa_blt(rd, rs, off), ctx); + break; + case BPF_JSGE: + emit(hppa_bge(rd, rs, off), ctx); + break; + case BPF_JSLE: + emit(hppa_ble(rd, rs, off), ctx); + break; + default: + WARN_ON(1); + } + + if (far) { + int ret; + e = ctx->ninsns; + /* Adjust for extra insns. */ + paoff -= (e - s); + ret = emit_jump(paoff, true, ctx); + if (ret) + return ret; + } else { + /* + * always allocate 2 nops instead of the far branch to + * reduce translation loops + */ + emit(hppa_nop(), ctx); + emit(hppa_nop(), ctx); + } + return 0; +} + +static void emit_zext_32(u8 reg, struct hppa_jit_context *ctx) +{ + emit_hppa64_zext32(reg, reg, ctx); +} + +static void emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx) +{ + /* + * R1 -> &ctx + * R2 -> &array + * R3 -> index + */ + int off; + const s8 arr_reg = regmap[BPF_REG_2]; + const s8 idx_reg = regmap[BPF_REG_3]; + struct bpf_array bpfa; + struct bpf_prog bpfp; + + /* if there is any tail call, we need to save & restore all registers */ + REG_SET_SEEN_ALL(ctx); + + /* get address of TCC main exit function for error case into rp */ + emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx); + + /* max_entries = array->map.max_entries; */ + off = offsetof(struct bpf_array, map.max_entries); + BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4); + emit(hppa_ldw(off, arr_reg, HPPA_REG_T1), ctx); + + /* + * if (index >= max_entries) + * goto out; + */ + emit(hppa_bltu(idx_reg, HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * if (--tcc < 0) + * goto out; + */ + REG_FORCE_SEEN(ctx, HPPA_REG_TCC); + emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx); + emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 8); + emit(hppa64_shladd(idx_reg, 3, arr_reg, HPPA_REG_T0), ctx); + off = offsetof(struct bpf_array, ptrs); + BUILD_BUG_ON(off < 16); + emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx); + emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx); + + /* + * tcc = temp_tcc; + * goto *(prog->bpf_func + 4); + */ + off = offsetof(struct bpf_prog, bpf_func); + BUILD_BUG_ON(off < 16); + BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 8); + emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx); + /* Epilogue jumps to *(t0 + 4). */ + __build_epilogue(true, ctx); +} + +static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, + struct hppa_jit_context *ctx) +{ + u8 code = insn->code; + + switch (code) { + case BPF_JMP | BPF_JA: + case BPF_JMP | BPF_CALL: + case BPF_JMP | BPF_EXIT: + case BPF_JMP | BPF_TAIL_CALL: + break; + default: + *rd = bpf_to_hppa_reg(insn->dst_reg, ctx); + } + + if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || + code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || + code & BPF_LDX || code & BPF_STX) + *rs = bpf_to_hppa_reg(insn->src_reg, ctx); +} + +static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx) +{ + emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx); + *rd = HPPA_REG_T2; + emit_hppa64_zext32(*rs, HPPA_REG_T1, ctx); + *rs = HPPA_REG_T1; +} + +static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx) +{ + emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx); + *rd = HPPA_REG_T2; + emit_hppa64_sext32(*rs, HPPA_REG_T1, ctx); + *rs = HPPA_REG_T1; +} + +static void emit_zext_32_rd_t1(u8 *rd, struct hppa_jit_context *ctx) +{ + emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx); + *rd = HPPA_REG_T2; + emit_zext_32(HPPA_REG_T1, ctx); +} + +static void emit_sext_32_rd(u8 *rd, struct hppa_jit_context *ctx) +{ + emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx); + *rd = HPPA_REG_T2; +} + +static bool is_signed_bpf_cond(u8 cond) +{ + return cond == BPF_JSGT || cond == BPF_JSLT || + cond == BPF_JSGE || cond == BPF_JSLE; +} + +static void emit_call(u64 addr, bool fixed, struct hppa_jit_context *ctx) +{ + const int offset_sp = 2*FRAME_SIZE; + + emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx); + + emit_hppa_copy(regmap[BPF_REG_1], HPPA_REG_ARG0, ctx); + emit_hppa_copy(regmap[BPF_REG_2], HPPA_REG_ARG1, ctx); + emit_hppa_copy(regmap[BPF_REG_3], HPPA_REG_ARG2, ctx); + emit_hppa_copy(regmap[BPF_REG_4], HPPA_REG_ARG3, ctx); + emit_hppa_copy(regmap[BPF_REG_5], HPPA_REG_ARG4, ctx); + + /* Backup TCC. */ + REG_FORCE_SEEN(ctx, HPPA_REG_TCC_SAVED); + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx); + + /* + * Use ldil() to load absolute address. Don't use emit_imm as the + * number of emitted instructions should not depend on the value of + * addr. + */ + WARN_ON(addr >> 32); + /* load function address and gp from Elf64_Fdesc descriptor */ + emit(hppa_ldil(addr, HPPA_REG_R31), ctx); + emit(hppa_ldo(im11(addr), HPPA_REG_R31, HPPA_REG_R31), ctx); + emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr), + HPPA_REG_R31, HPPA_REG_RP), ctx); + emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx); + emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp), + HPPA_REG_R31, HPPA_REG_GP), ctx); + + /* Restore TCC. */ + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx); + + emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx); + + /* Set return value. */ + emit_hppa_copy(HPPA_REG_RET0, regmap[BPF_REG_0], ctx); +} + +static void emit_call_libgcc_ll(void *func, const s8 arg0, + const s8 arg1, u8 opcode, struct hppa_jit_context *ctx) +{ + u64 func_addr; + + if (BPF_CLASS(opcode) == BPF_ALU) { + emit_hppa64_zext32(arg0, HPPA_REG_ARG0, ctx); + emit_hppa64_zext32(arg1, HPPA_REG_ARG1, ctx); + } else { + emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx); + emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx); + } + + /* libcgcc overwrites HPPA_REG_RET0, so keep copy in HPPA_REG_TCC_SAVED */ + if (arg0 != HPPA_REG_RET0) { + REG_SET_SEEN(ctx, HPPA_REG_TCC_SAVED); + emit(hppa_copy(HPPA_REG_RET0, HPPA_REG_TCC_SAVED), ctx); + } + + /* set up stack */ + emit(hppa_ldo(FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx); + + func_addr = (uintptr_t) func; + /* load function func_address and gp from Elf64_Fdesc descriptor */ + emit_imm(HPPA_REG_R31, func_addr, arg0, ctx); + emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr), + HPPA_REG_R31, HPPA_REG_RP), ctx); + /* skip the following bve_l instruction if divisor is 0. */ + if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) { + if (BPF_OP(opcode) == BPF_DIV) + emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx); + else { + emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET0, ctx); + } + emit(hppa_beq(HPPA_REG_ARG1, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx); + } + emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx); + emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp), + HPPA_REG_R31, HPPA_REG_GP), ctx); + + emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx); + + emit_hppa_copy(HPPA_REG_RET0, arg0, ctx); + + /* restore HPPA_REG_RET0 */ + if (arg0 != HPPA_REG_RET0) + emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_RET0), ctx); +} + +static void emit_store(const s8 rd, const s8 rs, s16 off, + struct hppa_jit_context *ctx, const u8 size, + const u8 mode) +{ + s8 dstreg; + + /* need to calculate address since offset does not fit in 14 bits? */ + if (relative_bits_ok(off, 14)) + dstreg = rd; + else { + /* need to use R1 here, since addil puts result into R1 */ + dstreg = HPPA_REG_R1; + emit(hppa_addil(off, rd), ctx); + off = im11(off); + } + + switch (size) { + case BPF_B: + emit(hppa_stb(rs, off, dstreg), ctx); + break; + case BPF_H: + emit(hppa_sth(rs, off, dstreg), ctx); + break; + case BPF_W: + emit(hppa_stw(rs, off, dstreg), ctx); + break; + case BPF_DW: + if (off & 7) { + emit(hppa_ldo(off, dstreg, HPPA_REG_R1), ctx); + emit(hppa64_std_im5(rs, 0, HPPA_REG_R1), ctx); + } else if (off >= -16 && off <= 15) + emit(hppa64_std_im5(rs, off, dstreg), ctx); + else + emit(hppa64_std_im16(rs, off, dstreg), ctx); + break; + } +} + +int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx, + bool extra_pass) +{ + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || + BPF_CLASS(insn->code) == BPF_JMP; + int s, e, ret, i = insn - ctx->prog->insnsi; + s64 paoff; + struct bpf_prog_aux *aux = ctx->prog->aux; + u8 rd = -1, rs = -1, code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + + init_regs(&rd, &rs, insn, ctx); + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_zext_32(rd, ctx); + break; + } + if (!is64 && !aux->verifier_zext) + emit_hppa64_zext32(rs, rd, ctx); + else + emit_hppa_copy(rs, rd, ctx); + break; + + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(hppa_add(rd, rs, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(hppa_sub(rd, rs, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit(hppa_and(rd, rs, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit(hppa_or(rd, rs, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(hppa_xor(rd, rs, rd), ctx); + if (!is64 && !aux->verifier_zext && rs != rd) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx); + rs = HPPA_REG_T1; + fallthrough; + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit_call_libgcc_ll(__muldi3, rd, rs, code, ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx); + rs = HPPA_REG_T1; + fallthrough; + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_call_libgcc_ll(&hppa_div64, rd, rs, code, ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx); + rs = HPPA_REG_T1; + fallthrough; + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, code, ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_hppa64_sext32(rs, HPPA_REG_T0, ctx); + emit(hppa64_mtsarcm(HPPA_REG_T0), ctx); + if (is64) + emit(hppa64_depdz_sar(rd, rd), ctx); + else + emit(hppa_depwz_sar(rd, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(hppa_mtsar(rs), ctx); + if (is64) + emit(hppa64_shrpd_sar(rd, rd), ctx); + else + emit(hppa_shrpw_sar(rd, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_hppa64_sext32(rs, HPPA_REG_T0, ctx); + emit(hppa64_mtsarcm(HPPA_REG_T0), ctx); + if (is64) + emit(hppa_extrd_sar(rd, rd, 1), ctx); + else + emit(hppa_extrws_sar(rd, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + emit(hppa_sub(HPPA_REG_ZERO, rd, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + emit_hppa64_depd(HPPA_REG_ZERO, 63-16, 64-16, rd, 1, ctx); + break; + case 32: + if (!aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case 64: + /* Do nothing */ + break; + } + break; + + case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: + emit(hppa_extru(rd, 31 - 8, 8, HPPA_REG_T1), ctx); + emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx); + emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx); + emit_hppa64_extrd(HPPA_REG_T1, 63, 16, rd, 0, ctx); + break; + case 32: + emit(hppa_shrpw(rd, rd, 16, HPPA_REG_T1), ctx); + emit_hppa64_depd(HPPA_REG_T1, 63-16, 8, HPPA_REG_T1, 1, ctx); + emit(hppa_shrpw(rd, HPPA_REG_T1, 8, HPPA_REG_T1), ctx); + emit_hppa64_extrd(HPPA_REG_T1, 63, 32, rd, 0, ctx); + break; + case 64: + emit(hppa64_permh_3210(rd, HPPA_REG_T1), ctx); + emit(hppa64_hshl(HPPA_REG_T1, 8, HPPA_REG_T2), ctx); + emit(hppa64_hshr_u(HPPA_REG_T1, 8, HPPA_REG_T1), ctx); + emit(hppa_or(HPPA_REG_T2, HPPA_REG_T1, rd), ctx); + break; + default: + pr_err("bpf-jit: BPF_END imm %d invalid\n", imm); + return -1; + } + break; + + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_imm(rd, imm, HPPA_REG_T2, ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + + /* dst = dst OP imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + if (relative_bits_ok(imm, 14)) { + emit(hppa_ldo(imm, rd, rd), ctx); + } else { + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_add(rd, HPPA_REG_T1, rd), ctx); + } + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (relative_bits_ok(-imm, 14)) { + emit(hppa_ldo(-imm, rd, rd), ctx); + } else { + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_sub(rd, HPPA_REG_T1, rd), ctx); + } + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_and(rd, HPPA_REG_T1, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_or(rd, HPPA_REG_T1, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_xor(rd, HPPA_REG_T1, rd), ctx); + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + if (imm != 0) { + emit_hppa64_shld(rd, imm, rd, ctx); + } + + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + if (imm != 0) { + if (is64) + emit_hppa64_shrd(rd, imm, rd, false, ctx); + else + emit_hppa64_shrw(rd, imm, rd, false, ctx); + } + + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm != 0) { + if (is64) + emit_hppa64_shrd(rd, imm, rd, true, ctx); + else + emit_hppa64_shrw(rd, imm, rd, true, ctx); + } + + if (!is64 && !aux->verifier_zext) + emit_zext_32(rd, ctx); + break; + + /* JUMP off */ + case BPF_JMP | BPF_JA: + paoff = hppa_offset(i, off, ctx); + ret = emit_jump(paoff, false, ctx); + if (ret) + return ret; + break; + + /* IF (dst COND src) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + paoff = hppa_offset(i, off, ctx); + if (!is64) { + s = ctx->ninsns; + if (is_signed_bpf_cond(BPF_OP(code))) + emit_sext_32_rd_rs(&rd, &rs, ctx); + else + emit_zext_32_rd_rs(&rd, &rs, ctx); + e = ctx->ninsns; + + /* Adjust for extra insns */ + paoff -= (e - s); + } + if (BPF_OP(code) == BPF_JSET) { + /* Adjust for and */ + paoff -= 1; + emit(hppa_and(rs, rd, HPPA_REG_T1), ctx); + emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff, + ctx); + } else { + emit_branch(BPF_OP(code), rd, rs, paoff, ctx); + } + break; + + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + paoff = hppa_offset(i, off, ctx); + s = ctx->ninsns; + if (imm) { + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + rs = HPPA_REG_T1; + } else { + rs = HPPA_REG_ZERO; + } + if (!is64) { + if (is_signed_bpf_cond(BPF_OP(code))) + emit_sext_32_rd(&rd, ctx); + else + emit_zext_32_rd_t1(&rd, ctx); + } + e = ctx->ninsns; + + /* Adjust for extra insns */ + paoff -= (e - s); + emit_branch(BPF_OP(code), rd, rs, paoff, ctx); + break; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + paoff = hppa_offset(i, off, ctx); + s = ctx->ninsns; + emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx); + emit(hppa_and(HPPA_REG_T1, rd, HPPA_REG_T1), ctx); + /* For jset32, we should clear the upper 32 bits of t1, but + * sign-extension is sufficient here and saves one instruction, + * as t1 is used only in comparison against zero. + */ + if (!is64 && imm < 0) + emit_hppa64_sext32(HPPA_REG_T1, HPPA_REG_T1, ctx); + e = ctx->ninsns; + paoff -= (e - s); + emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff, ctx); + break; + /* function call */ + case BPF_JMP | BPF_CALL: + { + bool fixed_addr; + u64 addr; + + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, + &addr, &fixed_addr); + if (ret < 0) + return ret; + + REG_SET_SEEN_ALL(ctx); + emit_call(addr, fixed_addr, ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + emit_bpf_tail_call(i, ctx); + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + + paoff = epilogue_offset(ctx); + ret = emit_jump(paoff, false, ctx); + if (ret) + return ret; + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + struct bpf_insn insn1 = insn[1]; + u64 imm64 = (u64)insn1.imm << 32 | (u32)imm; + if (bpf_pseudo_func(insn)) + imm64 = (uintptr_t)dereference_function_descriptor((void*)imm64); + emit_imm(rd, imm64, HPPA_REG_T2, ctx); + + return 1; + } + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + { + u8 srcreg; + + /* need to calculate address since offset does not fit in 14 bits? */ + if (relative_bits_ok(off, 14)) + srcreg = rs; + else { + /* need to use R1 here, since addil puts result into R1 */ + srcreg = HPPA_REG_R1; + BUG_ON(rs == HPPA_REG_R1); + BUG_ON(rd == HPPA_REG_R1); + emit(hppa_addil(off, rs), ctx); + off = im11(off); + } + + switch (BPF_SIZE(code)) { + case BPF_B: + emit(hppa_ldb(off, srcreg, rd), ctx); + if (insn_is_zext(&insn[1])) + return 1; + break; + case BPF_H: + emit(hppa_ldh(off, srcreg, rd), ctx); + if (insn_is_zext(&insn[1])) + return 1; + break; + case BPF_W: + emit(hppa_ldw(off, srcreg, rd), ctx); + if (insn_is_zext(&insn[1])) + return 1; + break; + case BPF_DW: + if (off & 7) { + emit(hppa_ldo(off, srcreg, HPPA_REG_R1), ctx); + emit(hppa64_ldd_reg(HPPA_REG_ZERO, HPPA_REG_R1, rd), ctx); + } else if (off >= -16 && off <= 15) + emit(hppa64_ldd_im5(off, srcreg, rd), ctx); + else + emit(hppa64_ldd_im16(off, srcreg, rd), ctx); + break; + } + break; + } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + + /* ST: *(size *)(dst + off) = imm */ + /* STX: *(size *)(dst + off) = src */ + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + if (BPF_CLASS(code) == BPF_ST) { + emit_imm(HPPA_REG_T2, imm, HPPA_REG_T1, ctx); + rs = HPPA_REG_T2; + } + + emit_store(rd, rs, off, ctx, BPF_SIZE(code), BPF_MODE(code)); + break; + + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + pr_info_once( + "bpf-jit: not supported: atomic operation %02x ***\n", + insn->imm); + return -EFAULT; + + default: + pr_err("bpf-jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +void bpf_jit_build_prologue(struct hppa_jit_context *ctx) +{ + int bpf_stack_adjust, stack_adjust, i; + unsigned long addr; + s8 reg; + + /* + * stack on hppa grows up, so if tail calls are used we need to + * allocate the maximum stack size + */ + if (REG_ALL_SEEN(ctx)) + bpf_stack_adjust = MAX_BPF_STACK; + else + bpf_stack_adjust = ctx->prog->aux->stack_depth; + bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN); + + stack_adjust = FRAME_SIZE + bpf_stack_adjust; + stack_adjust = round_up(stack_adjust, STACK_ALIGN); + + /* + * NOTE: We construct an Elf64_Fdesc descriptor here. + * The first 4 words initialize the TCC and compares them. + * Then follows the virtual address of the eBPF function, + * and the gp for this function. + * + * The first instruction sets the tail-call-counter (TCC) register. + * This instruction is skipped by tail calls. + * Use a temporary register instead of a caller-saved register initially. + */ + REG_FORCE_SEEN(ctx, HPPA_REG_TCC_IN_INIT); + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx); + + /* + * Skip all initializations when called as BPF TAIL call. + */ + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx); + emit(hppa_beq(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, 6 - HPPA_BRANCH_DISPLACEMENT), ctx); + emit(hppa64_bl_long(ctx->prologue_len - 3 - HPPA_BRANCH_DISPLACEMENT), ctx); + + /* store entry address of this eBPF function */ + addr = (uintptr_t) &ctx->insns[0]; + emit(addr >> 32, ctx); + emit(addr & 0xffffffff, ctx); + + /* store gp of this eBPF function */ + asm("copy %%r27,%0" : "=r" (addr) ); + emit(addr >> 32, ctx); + emit(addr & 0xffffffff, ctx); + + /* Set up hppa stack frame. */ + emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx); + emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx); + emit(hppa64_std_im5 (HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx); + emit(hppa64_std_im16(HPPA_REG_RP, -2*REG_SIZE, HPPA_REG_SP), ctx); + + /* Save callee-save registers. */ + for (i = 3; i <= 15; i++) { + if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i))) + continue; + emit(hppa64_std_im16(HPPA_R(i), -REG_SIZE * i, HPPA_REG_SP), ctx); + } + + /* load function parameters; load all if we use tail functions */ + #define LOAD_PARAM(arg, dst) \ + if (REG_WAS_SEEN(ctx, regmap[dst]) || \ + REG_WAS_SEEN(ctx, HPPA_REG_TCC)) \ + emit_hppa_copy(arg, regmap[dst], ctx) + LOAD_PARAM(HPPA_REG_ARG0, BPF_REG_1); + LOAD_PARAM(HPPA_REG_ARG1, BPF_REG_2); + LOAD_PARAM(HPPA_REG_ARG2, BPF_REG_3); + LOAD_PARAM(HPPA_REG_ARG3, BPF_REG_4); + LOAD_PARAM(HPPA_REG_ARG4, BPF_REG_5); + #undef LOAD_PARAM + + REG_FORCE_SEEN(ctx, HPPA_REG_T0); + REG_FORCE_SEEN(ctx, HPPA_REG_T1); + REG_FORCE_SEEN(ctx, HPPA_REG_T2); + + /* + * Now really set the tail call counter (TCC) register. + */ + if (REG_WAS_SEEN(ctx, HPPA_REG_TCC)) + emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx); + + /* + * Save epilogue function pointer for outer TCC call chain. + * The main TCC call stores the final RP on stack. + */ + addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset]; + /* skip first two instructions which jump to exit */ + addr += 2 * HPPA_INSN_SIZE; + emit_imm(HPPA_REG_T2, addr, HPPA_REG_T1, ctx); + emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx); + + /* Set up BPF frame pointer. */ + reg = regmap[BPF_REG_FP]; /* -> HPPA_REG_FP */ + if (REG_WAS_SEEN(ctx, reg)) { + emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, reg), ctx); + } +} + +void bpf_jit_build_epilogue(struct hppa_jit_context *ctx) +{ + __build_epilogue(false, ctx); +} + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c new file mode 100644 index 000000000000..06cbcd6fe87b --- /dev/null +++ b/arch/parisc/net/bpf_jit_core.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common functionality for HPPA32 and HPPA64 BPF JIT compilers + * + * Copyright (c) 2023 Helge Deller <deller@gmx.de> + * + */ + +#include <linux/bpf.h> +#include <linux/filter.h> +#include "bpf_jit.h" + +/* Number of iterations to try until offsets converge. */ +#define NR_JIT_ITERATIONS 35 + +static int build_body(struct hppa_jit_context *ctx, bool extra_pass, int *offset) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + ctx->reg_seen_collect = true; + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + ret = bpf_jit_emit_insn(insn, ctx, extra_pass); + /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */ + if (ret > 0) + i++; + if (offset) + offset[i] = ctx->ninsns; + if (ret < 0) + return ret; + } + ctx->reg_seen_collect = false; + return 0; +} + +bool bpf_jit_needs_zext(void) +{ + return true; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + unsigned int prog_size = 0, extable_size = 0; + bool tmp_blinded = false, extra_pass = false; + struct bpf_prog *tmp, *orig_prog = prog; + int pass = 0, prev_ninsns = 0, prologue_len, i; + struct hppa_jit_data *jit_data; + struct hppa_jit_context *ctx; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + + ctx = &jit_data->ctx; + + if (ctx->offset) { + extra_pass = true; + prog_size = sizeof(*ctx->insns) * ctx->ninsns; + goto skip_init_ctx; + } + + ctx->prog = prog; + ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (!ctx->offset) { + prog = orig_prog; + goto out_offset; + } + for (i = 0; i < prog->len; i++) { + prev_ninsns += 20; + ctx->offset[i] = prev_ninsns; + } + + for (i = 0; i < NR_JIT_ITERATIONS; i++) { + pass++; + ctx->ninsns = 0; + if (build_body(ctx, extra_pass, ctx->offset)) { + prog = orig_prog; + goto out_offset; + } + ctx->body_len = ctx->ninsns; + bpf_jit_build_prologue(ctx); + ctx->prologue_len = ctx->ninsns - ctx->body_len; + ctx->epilogue_offset = ctx->ninsns; + bpf_jit_build_epilogue(ctx); + + if (ctx->ninsns == prev_ninsns) { + if (jit_data->header) + break; + /* obtain the actual image size */ + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + prog_size = sizeof(*ctx->insns) * ctx->ninsns; + + jit_data->header = + bpf_jit_binary_alloc(prog_size + extable_size, + &jit_data->image, + sizeof(long), + bpf_fill_ill_insns); + if (!jit_data->header) { + prog = orig_prog; + goto out_offset; + } + + ctx->insns = (u32 *)jit_data->image; + /* + * Now, when the image is allocated, the image can + * potentially shrink more (auipc/jalr -> jal). + */ + } + prev_ninsns = ctx->ninsns; + } + + if (i == NR_JIT_ITERATIONS) { + pr_err("bpf-jit: image did not converge in <%d passes!\n", i); + if (jit_data->header) + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + + if (extable_size) + prog->aux->extable = (void *)ctx->insns + prog_size; + +skip_init_ctx: + pass++; + ctx->ninsns = 0; + + bpf_jit_build_prologue(ctx); + if (build_body(ctx, extra_pass, NULL)) { + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + bpf_jit_build_epilogue(ctx); + + if (HPPA_JIT_DEBUG || bpf_jit_enable > 1) { + if (HPPA_JIT_DUMP) + bpf_jit_dump(prog->len, prog_size, pass, ctx->insns); + if (HPPA_JIT_REBOOT) + { extern int machine_restart(char *); machine_restart(""); } + } + + prog->bpf_func = (void *)ctx->insns; + prog->jited = 1; + prog->jited_len = prog_size; + + bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); + + if (!prog->is_func || extra_pass) { + if (bpf_jit_binary_lock_ro(jit_data->header)) { + bpf_jit_binary_free(jit_data->header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; + } + prologue_len = ctx->epilogue_offset - ctx->body_len; + for (i = 0; i < prog->len; i++) + ctx->offset[i] += prologue_len; + bpf_prog_fill_jited_linfo(prog, ctx->offset); +out_offset: + kfree(ctx->offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (HPPA_JIT_REBOOT) + { extern int machine_restart(char *); machine_restart(""); } + + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} + +u64 hppa_div64(u64 div, u64 divisor) +{ + div = div64_u64(div, divisor); + return div; +} + +u64 hppa_div64_rem(u64 div, u64 divisor) +{ + u64 rem; + div64_u64_rem(div, divisor, &rem); + return rem; +} diff --git a/arch/parisc/nm b/arch/parisc/nm deleted file mode 100644 index c788308de33f..000000000000 --- a/arch/parisc/nm +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -## -# Hack to have an nm which removes the local symbols. We also rely -# on this nm being hidden out of the ordinarily executable path -## -${CROSS_COMPILE}nm $* | grep -v '.LC*[0-9]*$' diff --git a/arch/parisc/oprofile/Makefile b/arch/parisc/oprofile/Makefile deleted file mode 100644 index 86a1ccc328eb..000000000000 --- a/arch/parisc/oprofile/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_OPROFILE) += oprofile.o - -DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ - oprof.o cpu_buffer.o buffer_sync.o \ - event_buffer.o oprofile_files.o \ - oprofilefs.o oprofile_stats.o \ - timer_int.o ) - -oprofile-y := $(DRIVER_OBJS) init.o diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c deleted file mode 100644 index 026cba2af07a..000000000000 --- a/arch/parisc/oprofile/init.c +++ /dev/null @@ -1,23 +0,0 @@ -/** - * @file init.c - * - * @remark Copyright 2002 OProfile authors - * @remark Read the file COPYING - * - * @author John Levon <levon@movementarian.org> - */ - -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/oprofile.h> - -int __init oprofile_arch_init(struct oprofile_operations *ops) -{ - return -ENODEV; -} - - -void oprofile_arch_exit(void) -{ -} diff --git a/arch/parisc/video/Makefile b/arch/parisc/video/Makefile new file mode 100644 index 000000000000..b5db5b42880f --- /dev/null +++ b/arch/parisc/video/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_STI_CORE) += video-sti.o diff --git a/arch/parisc/video/video-sti.c b/arch/parisc/video/video-sti.c new file mode 100644 index 000000000000..564661e87093 --- /dev/null +++ b/arch/parisc/video/video-sti.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2001-2020 Helge Deller <deller@gmx.de> + * Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de> + */ + +#include <linux/module.h> + +#include <video/sticore.h> + +#include <asm/video.h> + +bool video_is_primary_device(struct device *dev) +{ + struct sti_struct *sti; + + sti = sti_get_rom(0); + + /* if no built-in graphics card found, allow any fb driver as default */ + if (!sti) + return true; + + /* return true if it's the default built-in framebuffer driver */ + return (sti->dev == dev); +} +EXPORT_SYMBOL(video_is_primary_device); |
