diff options
Diffstat (limited to 'arch/openrisc')
75 files changed, 1637 insertions, 1416 deletions
diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild new file mode 100644 index 000000000000..70bdb24ff204 --- /dev/null +++ b/arch/openrisc/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += lib/ kernel/ mm/ + +# for cleaning +subdir- += boot diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 1928e061ff96..b38fee299bc4 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,36 +7,43 @@ config OPENRISC def_bool y select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_SET_UNCACHED + select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select GENERIC_BUILTIN_DTB + select COMMON_CLK select OF select OF_EARLY_FLATTREE select IRQ_DOMAIN - select HANDLE_DOMAIN_IRQ select GPIOLIB select HAVE_ARCH_TRACEHOOK select SPARSE_IRQ select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_IOMAP + select GENERIC_PCI_IOMAP + select GENERIC_IOREMAP select GENERIC_CPU_DEVICES + select HAVE_PCI select HAVE_UID16 + select HAVE_PAGE_SIZE_8KB + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ select GENERIC_ATOMIC64 - select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER select GENERIC_SMP_IDLE_THREAD select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW select OR1K_PIC select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 - select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_RWLOCKS select OMPIC if SMP + select PCI_DOMAINS_GENERIC if PCI + select PCI_MSI if PCI select ARCH_WANT_FRAME_POINTERS select GENERIC_IRQ_MULTI_HANDLER select MMU_GATHER_NO_RANGE if MMU + select TRACE_IRQFLAGS_SUPPORT config CPU_BIG_ENDIAN def_bool y @@ -50,9 +57,6 @@ config GENERIC_HWEIGHT config NO_IOPORT_MAP def_bool y -config TRACE_IRQFLAGS_SUPPORT - def_bool y - # For now, use generic checksum functions #These can be reimplemented in assembly later if so inclined config GENERIC_CSUM @@ -64,6 +68,9 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y +config FIX_EARLYCON_MEM + def_bool y + menu "Processor type and features" choice @@ -88,7 +95,7 @@ config DCACHE_WRITETHROUGH If unsure say N here -config OPENRISC_BUILTIN_DTB +config BUILTIN_DTB_NAME string "Builtin DTB" default "" @@ -117,6 +124,59 @@ config OPENRISC_HAVE_INST_DIV default y help Select this if your implementation has a hardware divide instruction + +config OPENRISC_HAVE_INST_CMOV + bool "Have instruction l.cmov for conditional move" + default n + help + This config enables gcc to generate l.cmov instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.cmov conistional move instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_ROR + bool "Have instruction l.ror for rotate right" + default n + help + This config enables gcc to generate l.ror instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.ror rotate right instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_RORI + bool "Have instruction l.rori for rotate right with immediate" + default n + help + This config enables gcc to generate l.rori instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.rori rotate right with immediate instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_SEXT + bool "Have instructions l.ext* for sign extension" + default n + help + This config enables gcc to generate l.ext* instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.exths, l.extbs, l.exthz and l.extbz size extend instructions. + + Say N if you are unsure. + endmenu config NR_CPUS @@ -134,6 +194,15 @@ config SMP If you don't know what to do here, say N. +config FPU + bool "FPU support" + default y + help + Say N here if you want to disable all floating-point related procedures + in the kernel and reduce binary size. + + If you don't know what to do here, say Y. + source "kernel/Kconfig.hz" config OPENRISC_NO_SPR_SR_DSX diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index bf10141c7426..68249521db5a 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -1,9 +1,7 @@ # BK Id: %F% %I% %G% %U% %#% # # This file is included by the global makefile so that you can add your own -# architecture-specific flags and dependencies. Remember to do have actions -# for "archclean" and "archdep" for cleaning up and making dependencies for -# this architecture +# architecture-specific flags and dependencies. # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -23,6 +21,11 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -S LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ +KBUILD_CFLAGS += -msfimm -mshftimm + +all: vmlinux.bin + +boot := arch/$(ARCH)/boot ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) KBUILD_CFLAGS += $(call cc-option,-mhard-mul) @@ -36,16 +39,25 @@ else KBUILD_CFLAGS += $(call cc-option,-msoft-div) endif -head-y := arch/openrisc/kernel/head.o +ifeq ($(CONFIG_OPENRISC_HAVE_INST_CMOV),y) + KBUILD_CFLAGS += $(call cc-option,-mcmov) +endif -core-y += arch/openrisc/lib/ \ - arch/openrisc/kernel/ \ - arch/openrisc/mm/ -libs-y += $(LIBGCC) +ifeq ($(CONFIG_OPENRISC_HAVE_INST_ROR),y) + KBUILD_CFLAGS += $(call cc-option,-mror) +endif -ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""' -BUILTIN_DTB := y -else -BUILTIN_DTB := n +ifeq ($(CONFIG_OPENRISC_HAVE_INST_RORI),y) + KBUILD_CFLAGS += $(call cc-option,-mrori) endif -core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_SEXT),y) + KBUILD_CFLAGS += $(call cc-option,-msext) +endif + +libs-y += $(LIBGCC) + +PHONY += vmlinux.bin + +vmlinux.bin: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/arch/openrisc/boot/.gitignore b/arch/openrisc/boot/.gitignore new file mode 100644 index 000000000000..007d6fea3145 --- /dev/null +++ b/arch/openrisc/boot/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.bin diff --git a/arch/openrisc/boot/Makefile b/arch/openrisc/boot/Makefile new file mode 100644 index 000000000000..5b28538f4dd1 --- /dev/null +++ b/arch/openrisc/boot/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for bootable kernel images +# + +targets += vmlinux.bin + +OBJCOPYFLAGS_vmlinux.bin := -O binary +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile index 17dd791a833f..3a66e0ef3985 100644 --- a/arch/openrisc/boot/dts/Makefile +++ b/arch/openrisc/boot/dts/Makefile @@ -1,9 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""' -BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o -else -BUILTIN_DTB := -endif -obj-y += $(BUILTIN_DTB) +dtb-y += $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME)) #DTC_FLAGS ?= -p 1024 diff --git a/arch/openrisc/boot/dts/or1klitex.dts b/arch/openrisc/boot/dts/or1klitex.dts new file mode 100644 index 000000000000..91c7173c50e6 --- /dev/null +++ b/arch/openrisc/boot/dts/or1klitex.dts @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LiteX-based System on Chip + * + * Copyright (C) 2019 Antmicro <www.antmicro.com> + */ + +/dts-v1/; +/ { + compatible = "opencores,or1ksim"; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&pic>; + + aliases { + serial0 = &serial0; + }; + + chosen { + bootargs = "console=liteuart"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x10000000>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + compatible = "opencores,or1200-rtlsvn481"; + reg = <0>; + clock-frequency = <100000000>; + }; + }; + + pic: pic { + compatible = "opencores,or1k-pic"; + #interrupt-cells = <1>; + interrupt-controller; + }; + + serial0: serial@e0006800 { + device_type = "serial"; + compatible = "litex,liteuart"; + reg = <0xe0006800 0x100>; + }; + + soc_ctrl0: soc_controller@e0000000 { + compatible = "litex,soc-controller"; + reg = <0xe0000000 0xc>; + status = "okay"; + }; + + ethernet@e0001000 { + compatible = "litex,liteeth"; + reg = <0xe0001000 0x7c>, + <0xe0001800 0x0a>, + <0x80000000 0x2000>; + reg-names = "mac", "mdio", "buffer"; + interrupts = <2>; + }; +}; diff --git a/arch/openrisc/configs/or1klitex_defconfig b/arch/openrisc/configs/or1klitex_defconfig new file mode 100644 index 000000000000..3e849d25838a --- /dev/null +++ b/arch/openrisc/configs/or1klitex_defconfig @@ -0,0 +1,56 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_CGROUPS=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_EXPERT=y +CONFIG_BUILTIN_DTB_NAME="or1klitex" +CONFIG_HZ_100=y +CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_RAW_DIAG=y +# CONFIG_WIRELESS is not set +# CONFIG_ETHTOOL_NETLINK is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_OF_OVERLAY=y +CONFIG_NETDEVICES=y +CONFIG_LITEX_LITEETH=y +# CONFIG_WLAN is not set +CONFIG_SERIAL_LITEUART=y +CONFIG_SERIAL_LITEUART_CONSOLE=y +CONFIG_TTY_PRINTK=y +# CONFIG_GPIO_CDEV is not set +CONFIG_MMC=y +CONFIG_MMC_LITEX=y +# CONFIG_VHOST_MENU is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_LITEX_SOC_CONTROLLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_PRINTK_TIME=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y +CONFIG_BUG_ON_DATA_CORRUPTION=y diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index d8ff4f8ffb88..59fe33cefba2 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -1,4 +1,3 @@ -CONFIG_CROSS_COMPILE="or1k-linux-" CONFIG_NO_HZ=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y @@ -11,10 +10,11 @@ CONFIG_EXPERT=y # CONFIG_AIO is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y +CONFIG_SLUB=y +CONFIG_SLUB_TINY=y CONFIG_MODULES=y # CONFIG_BLOCK is not set -CONFIG_OPENRISC_BUILTIN_DTB="or1ksim" +CONFIG_BUILTIN_DTB_NAME="or1ksim" CONFIG_HZ_100=y CONFIG_NET=y CONFIG_PACKET=y @@ -44,7 +44,6 @@ CONFIG_MICREL_PHY=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig index 64278992df9c..6008e824d31c 100644 --- a/arch/openrisc/configs/simple_smp_defconfig +++ b/arch/openrisc/configs/simple_smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_CROSS_COMPILE="or1k-linux-" CONFIG_LOCALVERSION="-simple-smp" CONFIG_NO_HZ=y CONFIG_LOG_BUF_SHIFT=14 @@ -17,10 +16,11 @@ CONFIG_EXPERT=y # CONFIG_AIO is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y +CONFIG_SLUB=y +CONFIG_SLUB_TINY=y CONFIG_MODULES=y # CONFIG_BLOCK is not set -CONFIG_OPENRISC_BUILTIN_DTB="simple_smp" +CONFIG_BUILTIN_DTB_NAME="simple_smp" CONFIG_SMP=y CONFIG_HZ_100=y CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y diff --git a/arch/openrisc/configs/virt_defconfig b/arch/openrisc/configs/virt_defconfig new file mode 100644 index 000000000000..c1b69166c500 --- /dev/null +++ b/arch/openrisc/configs/virt_defconfig @@ -0,0 +1,108 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_OPENRISC_HAVE_INST_CMOV=y +CONFIG_OPENRISC_HAVE_INST_ROR=y +CONFIG_OPENRISC_HAVE_INST_RORI=y +CONFIG_OPENRISC_HAVE_INST_SEXT=y +CONFIG_NR_CPUS=8 +CONFIG_SMP=y +CONFIG_HZ_100=y +# CONFIG_OPENRISC_NO_SPR_SR_DSX is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=y +CONFIG_VIRTIO_BLK=y +CONFIG_NETDEVICES=y +CONFIG_VIRTIO_NET=y +CONFIG_ETHOC=y +CONFIG_MICREL_PHY=y +# CONFIG_WLAN is not set +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_SYSCON_REBOOT_MODE=y +# CONFIG_HWMON is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_INTEL8X0=y +CONFIG_SND_INTEL8X0M=y +CONFIG_SND_SOC=y +CONFIG_SND_VIRTIO=y +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_SERIAL=y +CONFIG_USB_GADGET=y +CONFIG_TYPEC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_GOLDFISH=y +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_DNOTIFY is not set +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_UNICODE=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FTRACE=y diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index e12d6c1735a0..2b1a6b00cdac 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -1,45 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 -generic-y += barrier.h -generic-y += bug.h -generic-y += bugs.h -generic-y += checksum.h -generic-y += compat.h -generic-y += current.h -generic-y += device.h -generic-y += div64.h -generic-y += dma.h -generic-y += dma-mapping.h -generic-y += emergency-restart.h -generic-y += exec.h +syscall-y += syscall_table_32.h + generic-y += extable.h -generic-y += fb.h -generic-y += ftrace.h -generic-y += hardirq.h -generic-y += hw_irq.h -generic-y += irq_regs.h -generic-y += irq_work.h -generic-y += kdebug.h -generic-y += kmap_types.h -generic-y += kprobes.h generic-y += kvm_para.h -generic-y += local.h -generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h -generic-y += mmiowb.h -generic-y += module.h -generic-y += pci.h -generic-y += percpu.h -generic-y += preempt.h -generic-y += qspinlock_types.h -generic-y += qspinlock.h +generic-y += parport.h +generic-y += spinlock_types.h +generic-y += spinlock.h generic-y += qrwlock_types.h generic-y += qrwlock.h -generic-y += sections.h -generic-y += shmparam.h -generic-y += switch_to.h -generic-y += topology.h -generic-y += trace_clock.h generic-y += user.h -generic-y += vga.h -generic-y += word-at-a-time.h -generic-y += xor.h +generic-y += text-patching.h diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index b589fac39b92..8ce67ec7c9a3 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -13,7 +13,7 @@ /* Atomically perform op with v->counter and i */ #define ATOMIC_OP(op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ int tmp; \ \ @@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ /* Atomically perform op with v->counter and i, return the result */ #define ATOMIC_OP_RETURN(op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ int tmp; \ \ @@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ /* Atomically perform op with v->counter and i, return orig v->counter */ #define ATOMIC_FETCH_OP(op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ int tmp, old; \ \ @@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and) ATOMIC_FETCH_OP(or) ATOMIC_FETCH_OP(xor) +ATOMIC_OP(add) +ATOMIC_OP(sub) ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) @@ -83,16 +85,18 @@ ATOMIC_OP(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_add_return atomic_add_return -#define atomic_sub_return atomic_sub_return -#define atomic_fetch_add atomic_fetch_add -#define atomic_fetch_sub atomic_fetch_sub -#define atomic_fetch_and atomic_fetch_and -#define atomic_fetch_or atomic_fetch_or -#define atomic_fetch_xor atomic_fetch_xor -#define atomic_and atomic_and -#define atomic_or atomic_or -#define atomic_xor atomic_xor +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_add arch_atomic_add +#define arch_atomic_sub arch_atomic_sub +#define arch_atomic_and arch_atomic_and +#define arch_atomic_or arch_atomic_or +#define arch_atomic_xor arch_atomic_xor /* * Atomically add a to v->counter as long as v is not already u. @@ -100,7 +104,7 @@ ATOMIC_OP(xor) * * This is often used through atomic_inc_not_zero() */ -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int old, tmp; @@ -119,8 +123,11 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) return old; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -#include <asm-generic/atomic.h> +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) + +#include <asm/cmpxchg.h> #endif /* __ASM_OPENRISC_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h new file mode 100644 index 000000000000..7538294721be --- /dev/null +++ b/arch/openrisc/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#define mb() asm volatile ("l.msync" ::: "memory") + +#include <asm-generic/barrier.h> + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h index 7f1ca35213d8..d773ed938acb 100644 --- a/arch/openrisc/include/asm/bitops.h +++ b/arch/openrisc/include/asm/bitops.h @@ -30,7 +30,6 @@ #include <asm/bitops/fls.h> #include <asm/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly diff --git a/arch/openrisc/include/asm/bug.h b/arch/openrisc/include/asm/bug.h new file mode 100644 index 000000000000..6d04776eaf10 --- /dev/null +++ b/arch/openrisc/include/asm/bug.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_OPENRISC_BUG_H +#define __ASM_OPENRISC_BUG_H + +#include <asm-generic/bug.h> + +struct pt_regs; + +void __noreturn die(const char *str, struct pt_regs *regs, long err); + +#endif /* __ASM_OPENRISC_BUG_H */ diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h index 79d5d7753fe4..984c331ff5f4 100644 --- a/arch/openrisc/include/asm/cacheflush.h +++ b/arch/openrisc/include/asm/cacheflush.h @@ -56,37 +56,24 @@ static inline void sync_icache_dcache(struct page *page) */ #define PG_dc_clean PG_arch_1 +static inline void flush_dcache_folio(struct folio *folio) +{ + clear_bit(PG_dc_clean, &folio->flags); +} +#define flush_dcache_folio flush_dcache_folio + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) { - clear_bit(PG_dc_clean, &page->flags); + flush_dcache_folio(page_folio(page)); } -/* - * Other interfaces are not required since we do not have virtually - * indexed or tagged caches. So we can use the default here. - */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma, pg) do { } while (0) -#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - if (vma->vm_flags & VM_EXEC) \ - sync_icache_dcache(page); \ - } while (0) +#define flush_icache_user_page(vma, page, addr, len) \ +do { \ + if (vma->vm_flags & VM_EXEC) \ + sync_icache_dcache(page); \ +} while (0) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) +#include <asm-generic/cacheflush.h> #endif /* __ASM_CACHEFLUSH_H */ diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index f9cd43a39d72..8ee151c072e4 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, } } -#define cmpxchg(ptr, o, n) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ (__typeof__(*(ptr))) __cmpxchg((ptr), \ (unsigned long)(o), \ @@ -147,8 +147,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); -static inline unsigned long __xchg(volatile void *ptr, unsigned long with, - int size) +static inline unsigned long +__arch_xchg(volatile void *ptr, unsigned long with, int size) { switch (size) { case 1: @@ -161,11 +161,11 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with, } } -#define xchg(ptr, with) \ +#define arch_xchg(ptr, with) \ ({ \ - (__typeof__(*(ptr))) __xchg((ptr), \ - (unsigned long)(with), \ - sizeof(*(ptr))); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), \ + (unsigned long)(with), \ + sizeof(*(ptr))); \ }) #endif /* __ASM_OPENRISC_CMPXCHG_H */ diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h index ad78e50b7ba3..aaa6a26a3e92 100644 --- a/arch/openrisc/include/asm/fixmap.h +++ b/arch/openrisc/include/asm/fixmap.h @@ -26,59 +26,19 @@ #include <linux/bug.h> #include <asm/page.h> -/* - * On OpenRISC we use these special fixed_addresses for doing ioremap - * early in the boot process before memory initialization is complete. - * This is used, in particular, by the early serial console code. - * - * It's not really 'fixmap', per se, but fits loosely into the same - * paradigm. - */ enum fixed_addresses { - /* - * FIX_IOREMAP entries are useful for mapping physical address - * space before ioremap() is useable, e.g. really early in boot - * before kmalloc() is working. - */ -#define FIX_N_IOREMAPS 32 - FIX_IOREMAP_BEGIN, - FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1, + FIX_EARLYCON_MEM_BASE, __end_of_fixed_addresses }; #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) /* FIXADDR_BOTTOM might be a better name here... */ #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO PAGE_KERNEL_NOCACHE -#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ - /* - * this branch gets completely eliminated after inlining, - * except when someone tries to use fixaddr indices in an - * illegal way. (such as mixing up address types or using - * out-of-range indices). - * - * If it doesn't get removed, the linker will complain - * loudly with a reasonably clear error message.. - */ - if (idx >= __end_of_fixed_addresses) - BUG(); - - return __fix_to_virt(idx); -} +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h> #endif diff --git a/arch/openrisc/include/asm/fpu.h b/arch/openrisc/include/asm/fpu.h new file mode 100644 index 000000000000..57bc44d80d53 --- /dev/null +++ b/arch/openrisc/include/asm/fpu.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_OPENRISC_FPU_H +#define __ASM_OPENRISC_FPU_H + +struct task_struct; + +#ifdef CONFIG_FPU +static inline void save_fpu(struct task_struct *task) +{ + task->thread.fpcsr = mfspr(SPR_FPCSR); +} + +static inline void restore_fpu(struct task_struct *task) +{ + mtspr(SPR_FPCSR, task->thread.fpcsr); +} +#else +#define save_fpu(tsk) do { } while (0) +#define restore_fpu(tsk) do { } while (0) +#endif + +#endif /* __ASM_OPENRISC_FPU_H */ diff --git a/arch/openrisc/include/asm/futex.h b/arch/openrisc/include/asm/futex.h index fe894e6331ae..865e9cd0d97b 100644 --- a/arch/openrisc/include/asm/futex.h +++ b/arch/openrisc/include/asm/futex.h @@ -35,7 +35,8 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { int oldval = 0, ret; - pagefault_disable(); + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; switch (op) { case FUTEX_OP_SET: @@ -57,8 +58,6 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); - if (!ret) *oval = oldval; diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h index e18f038b2a6d..5a6f0f16a5ce 100644 --- a/arch/openrisc/include/asm/io.h +++ b/arch/openrisc/include/asm/io.h @@ -14,8 +14,12 @@ #ifndef __ASM_OPENRISC_IO_H #define __ASM_OPENRISC_IO_H +#include <linux/types.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> + /* - * PCI: can we really do 0 here if we have no port IO? + * PCI: We do not use IO ports in OpenRISC */ #define IO_SPACE_LIMIT 0 @@ -25,10 +29,11 @@ #define PIO_OFFSET 0 #define PIO_MASK 0 -#include <asm-generic/io.h> -#include <asm/pgtable.h> +/* + * I/O memory mapping functions. + */ +#define _PAGE_IOREMAP (pgprot_val(PAGE_KERNEL) | _PAGE_CI) -void __iomem *ioremap(phys_addr_t offset, unsigned long size); -extern void iounmap(void *addr); +#include <asm-generic/io.h> #endif diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h index ced577542e29..a6702384c77d 100644 --- a/arch/openrisc/include/asm/mmu_context.h +++ b/arch/openrisc/include/asm/mmu_context.h @@ -17,13 +17,13 @@ #include <asm-generic/mm_hooks.h> +#define init_new_context init_new_context extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +#define destroy_context destroy_context extern void destroy_context(struct mm_struct *mm); extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk); -#define deactivate_mm(tsk, mm) do { } while (0) - #define activate_mm(prev, next) switch_mm((prev), (next), NULL) /* current active pgd - this is similar to other processors pgd @@ -32,8 +32,6 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */ -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} +#include <asm-generic/mmu_context.h> #endif diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index 01069db59454..c589e96035e1 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -15,16 +15,7 @@ #ifndef __ASM_OPENRISC_PAGE_H #define __ASM_OPENRISC_PAGE_H - -/* PAGE_SHIFT determines the page size */ - -#define PAGE_SHIFT 13 -#ifdef __ASSEMBLY__ -#define PAGE_SIZE (1 << PAGE_SHIFT) -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> #define PAGE_OFFSET 0xc0000000 #define KERNELBASE PAGE_OFFSET @@ -72,25 +63,18 @@ typedef struct page *pgtable_t; #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} #define virt_to_page(addr) \ (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) - -#define pfn_valid(pfn) ((pfn) < max_mapnr) - #define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr))) #endif /* __ASSEMBLY__ */ - -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index da12a4c38c4b..3f110931d8f6 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -20,6 +20,9 @@ #include <linux/mm.h> #include <linux/memblock.h> +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +#include <asm-generic/pgalloc.h> + extern int mem_init_done; #define pmd_populate_kernel(mm, pmd, pte) \ @@ -38,15 +41,13 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd_t *ret = __pgd_alloc(mm, 0); - if (ret) { - memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + if (ret) memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } return ret; } @@ -61,44 +62,9 @@ extern inline pgd_t *pgd_alloc(struct mm_struct *mm) } #endif -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); -static inline struct page *pte_alloc_one(struct mm_struct *mm) -{ - struct page *pte; - pte = alloc_pages(GFP_KERNEL, 0); - if (!pte) - return NULL; - clear_page(page_address(pte)); - if (!pgtable_pte_page_ctor(pte)) { - __free_page(pte); - return NULL; - } - return pte; -} - -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, struct page *pte) -{ - pgtable_pte_page_dtor(pte); - __free_page(pte); -} - #define __pte_free_tlb(tlb, pte, addr) \ -do { \ - pgtable_pte_page_dtor(pte); \ - tlb_remove_page((tlb), (pte)); \ -} while (0) - -#define pmd_pgtable(pmd) pmd_page(pmd) + tlb_remove_ptdesc((tlb), page_ptdesc(pte)) #endif diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 248d22d8faa7..60c6ce7ff2dc 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -12,7 +12,7 @@ * et al. */ -/* or32 pgtable.h - macros and functions to manipulate page tables +/* or1k pgtable.h - macros and functions to manipulate page tables * * Based on: * include/asm-cris/pgtable.h @@ -21,7 +21,6 @@ #ifndef __ASM_OPENRISC_PGTABLE_H #define __ASM_OPENRISC_PGTABLE_H -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> #ifndef __ASSEMBLY__ @@ -30,14 +29,14 @@ /* * The Linux memory management assumes a three-level page table setup. On - * or32, we use that, but "fold" the mid level into the top-level page + * or1k, we use that, but "fold" the mid level into the top-level page * table. Since the MMU TLB is software loaded through an interrupt, it * supports any page table structure, so we could have used a three-level * setup, but for the amounts of memory we normally use, a two-level is * probably more efficient. * * This file contains the functions and defines necessary to modify and use - * the or32 page table tree. + * the or1k page table tree. */ extern void paging_init(void); @@ -47,7 +46,7 @@ extern void paging_init(void); * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -74,7 +73,6 @@ extern void paging_init(void); */ #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0UL /* * Kernels own virtual memory area. @@ -156,6 +154,9 @@ extern void paging_init(void); #define _KERNPG_TABLE \ (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY) +/* We borrow bit 11 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE _PAGE_U_SHARED + #define PAGE_NONE __pgprot(_PAGE_ALL) #define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) @@ -178,24 +179,6 @@ extern void paging_init(void); __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X - /* zero page used for uninitialized stuff */ extern unsigned long empty_zero_page[2048]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) @@ -236,8 +219,6 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_wrprotect(pte_t pte) { @@ -269,7 +250,7 @@ static inline pte_t pte_mkold(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; @@ -365,39 +346,18 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; } +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) -#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) - -/* to find an entry in a page-table-directory. */ -#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define __pgd_offset(address) pgd_index(address) - -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)); +} #define __pmd_offset(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -/* - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] - * - * this macro returns the index of the entry in the pte page which would - * control the given virtual address - */ -#define __pte_offset(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) -#define pte_offset_map(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - pte_offset_map(dir, address) - -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) +#define PFN_PTE_SHIFT PAGE_SHIFT #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) @@ -420,28 +380,55 @@ static inline void update_tlb(struct vm_area_struct *vma, extern void update_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *pte) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { - update_tlb(vma, address, pte); - update_cache(vma, address, pte); + update_tlb(vma, address, ptep); + update_cache(vma, address, ptep); } -/* __PHX__ FIXME, SWAP, this probably doesn't work */ +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) -/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ -/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ +/* __PHX__ FIXME, SWAP, this probably doesn't work */ -#define __swp_type(x) (((x).val >> 5) & 0x7f) +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <-------------- offset ---------------> E <- type --> 0 0 0 0 0 + * + * E is the exclusive marker that is not stored in swap entries. + * The zero'ed bits include _PAGE_PRESENT. + */ +#define __swp_type(x) (((x).val >> 5) & 0x3f) #define __swp_offset(x) ((x).val >> 12) #define __swp_entry(type, offset) \ - ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) + ((swp_entry_t) { (((type) & 0x3f) << 5) | ((offset) << 12) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} -#include <asm-generic/pgtable.h> +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} typedef pte_t *pte_addr_t; diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index ad53b3184885..e05d1b59e24e 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h @@ -44,6 +44,7 @@ struct task_struct; struct thread_struct { + long fpcsr; /* Floating point control status register. */ }; /* @@ -72,8 +73,8 @@ struct thread_struct { void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); -void release_thread(struct task_struct *); -unsigned long get_wchan(struct task_struct *p); +unsigned long __get_wchan(struct task_struct *p); +void show_registers(struct pt_regs *regs); #define cpu_relax() barrier() diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h index 01f81d4e97dc..e5a282b67075 100644 --- a/arch/openrisc/include/asm/ptrace.h +++ b/arch/openrisc/include/asm/ptrace.h @@ -17,6 +17,7 @@ #include <asm/spr_defs.h> #include <uapi/asm/ptrace.h> +#include <linux/compiler.h> /* * Make kernel PTrace/register structures opaque to userspace... userspace can @@ -42,6 +43,36 @@ struct pt_regs { /* Named registers */ long sr; /* Stored in place of r0 */ long sp; /* r1 */ + long gpr2; + long gpr3; + long gpr4; + long gpr5; + long gpr6; + long gpr7; + long gpr8; + long gpr9; + long gpr10; + long gpr11; + long gpr12; + long gpr13; + long gpr14; + long gpr15; + long gpr16; + long gpr17; + long gpr18; + long gpr19; + long gpr20; + long gpr21; + long gpr22; + long gpr23; + long gpr24; + long gpr25; + long gpr26; + long gpr27; + long gpr28; + long gpr29; + long gpr30; + long gpr31; }; struct { /* Old style */ @@ -66,16 +97,56 @@ struct pt_regs { /* TODO: Rename this to REDZONE because that's what it is */ #define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */ -#define instruction_pointer(regs) ((regs)->pc) +#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_gpr11) + +/* Helpers for working with the instruction pointer */ +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return (unsigned long)regs->pc; +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->pc = val; +} + #define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0) #define user_stack_pointer(regs) ((unsigned long)(regs)->sp) #define profile_pc(regs) instruction_pointer(regs) +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return (unsigned long)regs->sp; +} + static inline long regs_return_value(struct pt_regs *regs) { return regs->gpr[11]; } +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + #endif /* __ASSEMBLY__ */ /* @@ -115,6 +186,5 @@ static inline long regs_return_value(struct pt_regs *regs) #define PT_GPR31 124 #define PT_PC 128 #define PT_ORIG_GPR11 132 -#define PT_SYSCALLNO 136 #endif /* __ASM_OPENRISC_PTRACE_H */ diff --git a/arch/openrisc/include/asm/setup.h b/arch/openrisc/include/asm/setup.h new file mode 100644 index 000000000000..9acbc5deda69 --- /dev/null +++ b/arch/openrisc/include/asm/setup.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Stafford Horne + */ +#ifndef _ASM_OR1K_SETUP_H +#define _ASM_OR1K_SETUP_H + +#include <linux/init.h> +#include <asm-generic/setup.h> + +#ifndef __ASSEMBLY__ +void __init or1k_early_setup(void *fdt); +#endif + +#endif /* _ASM_OR1K_SETUP_H */ diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h deleted file mode 100644 index a8940bdfcb7e..000000000000 --- a/arch/openrisc/include/asm/spinlock.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * OpenRISC Linux - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * OpenRISC implementation: - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * et al. - */ - -#ifndef __ASM_OPENRISC_SPINLOCK_H -#define __ASM_OPENRISC_SPINLOCK_H - -#include <asm/qspinlock.h> - -#include <asm/qrwlock.h> - -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) - -#define arch_spin_relax(lock) cpu_relax() -#define arch_read_relax(lock) cpu_relax() -#define arch_write_relax(lock) cpu_relax() - - -#endif diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h deleted file mode 100644 index 7c6fb1208c88..000000000000 --- a/arch/openrisc/include/asm/spinlock_types.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H -#define _ASM_OPENRISC_SPINLOCK_TYPES_H - -#include <asm/qspinlock_types.h> -#include <asm/qrwlock_types.h> - -#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */ diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index e6383be2a195..903ed882bdec 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h @@ -57,13 +57,6 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); } -static inline void -syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - const unsigned long *args) -{ - memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); -} - static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_OPENRISC; diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h index 3a7eeae6f56a..9f4c47961bea 100644 --- a/arch/openrisc/include/asm/syscalls.h +++ b/arch/openrisc/include/asm/syscalls.h @@ -22,9 +22,7 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1, asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, int tls); +asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size); asmlinkage long __sys_fork(void); -#define sys_clone __sys_clone -#define sys_fork __sys_fork - #endif /* __ASM_OPENRISC_SYSCALLS_H */ diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 9afe68bc423b..4af3049c34c2 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -25,7 +25,7 @@ /* THREAD_SIZE is the size of the task_struct/kernel_stack combo. * normally, the stack is found by doing something like p + THREAD_SIZE - * in or32, a page is 8192 bytes, which seems like a sane size + * in or1k, a page is 8192 bytes, which seems like a sane size */ #define THREAD_SIZE_ORDER 0 @@ -40,18 +40,12 @@ */ #ifndef __ASSEMBLY__ -typedef unsigned long mm_segment_t; - struct thread_info { struct task_struct *task; /* main task structure */ unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* thread address space: - 0-0x7FFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread - */ __u8 supervisor_stack[0]; /* saved context data */ @@ -71,7 +65,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ .ksp = 0, \ } @@ -98,6 +91,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define TIF_SINGLESTEP 4 /* restore singlestep on return to user * mode */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_RESTORE_SIGMASK 9 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED @@ -109,6 +103,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h index d52b4e536e3f..5487fa93dd9b 100644 --- a/arch/openrisc/include/asm/timex.h +++ b/arch/openrisc/include/asm/timex.h @@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void) { return mfspr(SPR_TTCR); } +#define get_cycles get_cycles /* This isn't really used any more */ #define CLOCK_TICK_RATE 1000 diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h index e9a7f0b35a15..dbf030365ab4 100644 --- a/arch/openrisc/include/asm/tlbflush.h +++ b/arch/openrisc/include/asm/tlbflush.h @@ -17,8 +17,6 @@ #include <linux/mm.h> #include <asm/processor.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> #include <asm/current.h> #include <linux/sched.h> @@ -27,7 +25,7 @@ * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_range(vma, start, end) flushes a range of pages */ extern void local_flush_tlb_all(void); extern void local_flush_tlb_mm(struct mm_struct *mm); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 17c24f14615f..d6500a374e18 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -22,43 +22,7 @@ #include <linux/string.h> #include <asm/page.h> #include <asm/extable.h> - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ - -/* addr_limit is the maximum accessible address for the task. we misuse - * the KERNEL_DS and USER_DS values to both assign and compare the - * addr_limit values through the equally misnamed get/set_fs macros. - * (see above) - */ - -#define KERNEL_DS (~0UL) - -#define USER_DS (TASK_SIZE) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define segment_eq(a, b) ((a) == (b)) - -/* Ensure that the range from addr to addr+size is all within the process' - * address space - */ -#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size)) - -/* Ensure that addr is below task's addr_limit */ -#define __addr_ok(addr) ((unsigned long) addr < get_fs()) - -#define access_ok(addr, size) \ -({ \ - unsigned long __ao_addr = (unsigned long)(addr); \ - unsigned long __ao_size = (unsigned long)(size); \ - __range_ok(__ao_addr, __ao_size); \ -}) +#include <asm-generic/access_ok.h> /* * These are the main single-value transfer routines. They automatically @@ -100,7 +64,7 @@ extern long __put_user_bad(void); #define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ if (access_ok(__pu_addr, size)) \ __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ @@ -164,19 +128,19 @@ struct __large_struct { #define __get_user_nocheck(x, ptr, size) \ ({ \ - long __gu_err, __gu_val; \ - __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + long __gu_err; \ + __get_user_size((x), (ptr), (size), __gu_err); \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ - long __gu_err = -EFAULT, __gu_val = 0; \ - const __typeof__(*(ptr)) * __gu_addr = (ptr); \ - if (access_ok(__gu_addr, size)) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + long __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (access_ok(__gu_addr, size)) \ + __get_user_size((x), __gu_addr, (size), __gu_err); \ + else \ + (x) = (__typeof__(*(ptr))) 0; \ __gu_err; \ }) @@ -190,11 +154,13 @@ do { \ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ case 8: __get_user_asm2(x, ptr, retval); break; \ - default: (x) = __get_user_bad(); \ + default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, op) \ +{ \ + unsigned long __gu_tmp; \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ "2:\n" \ @@ -208,10 +174,14 @@ do { \ " .align 2\n" \ " .long 1b,3b\n" \ ".previous" \ - : "=r"(err), "=r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))__gu_tmp; \ +} #define __get_user_asm2(x, addr, err) \ +{ \ + unsigned long long __gu_tmp; \ __asm__ __volatile__( \ "1: l.lwz %1,0(%2)\n" \ "2: l.lwz %H1,4(%2)\n" \ @@ -228,8 +198,11 @@ do { \ " .long 1b,4b\n" \ " .long 2b,4b\n" \ ".previous" \ - : "=r"(err), "=&r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=&r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))( \ + (__typeof__((x)-(x)))__gu_tmp); \ +} /* more complex routines */ @@ -241,26 +214,23 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long size) return __copy_tofrom_user(to, (__force const void *)from, size); } static inline unsigned long -raw_copy_to_user(void *to, const void __user *from, unsigned long size) +raw_copy_to_user(void __user *to, const void *from, unsigned long size) { return __copy_tofrom_user((__force void *)to, from, size); } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER -extern unsigned long __clear_user(void *addr, unsigned long size); +extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline __must_check unsigned long -clear_user(void *addr, unsigned long size) +clear_user(void __user *addr, unsigned long size) { if (likely(access_ok(addr, size))) size = __clear_user(addr, size); return size; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h deleted file mode 100644 index 14353f2101f2..000000000000 --- a/arch/openrisc/include/asm/unaligned.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * OpenRISC Linux - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * OpenRISC implementation: - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * et al. - */ - -#ifndef __ASM_OPENRISC_UNALIGNED_H -#define __ASM_OPENRISC_UNALIGNED_H - -/* - * This is copied from the generic implementation and the C-struct - * variant replaced with the memmove variant. The GCC compiler - * for the OR32 arch optimizes too aggressively for the C-struct - * variant to work, so use the memmove variant instead. - * - * It may be worth considering implementing the unaligned access - * exception handler and allowing unaligned accesses (access_ok.h)... - * not sure if it would be much of a performance win without further - * investigation. - */ -#include <asm/byteorder.h> - -#if defined(__LITTLE_ENDIAN) -# include <linux/unaligned/le_memmove.h> -# include <linux/unaligned/be_byteshift.h> -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_le -# define put_unaligned __put_unaligned_le -#elif defined(__BIG_ENDIAN) -# include <linux/unaligned/be_memmove.h> -# include <linux/unaligned/le_byteshift.h> -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_be -# define put_unaligned __put_unaligned_be -#else -# error need to define endianess -#endif - -#endif /* __ASM_OPENRISC_UNALIGNED_H */ diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h new file mode 100644 index 000000000000..c73f65e18d3b --- /dev/null +++ b/arch/openrisc/include/asm/unistd.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_TIME32_SYSCALLS + +#include <uapi/asm/unistd.h> diff --git a/arch/openrisc/include/asm/vmalloc.h b/arch/openrisc/include/asm/vmalloc.h new file mode 100644 index 000000000000..75435eceec32 --- /dev/null +++ b/arch/openrisc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_OPENRISC_VMALLOC_H +#define _ASM_OPENRISC_VMALLOC_H + +#endif /* _ASM_OPENRISC_VMALLOC_H */ diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index e78470141932..2501e82a1a0a 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += unistd_32.h + generic-y += ucontext.h diff --git a/arch/openrisc/include/uapi/asm/elf.h b/arch/openrisc/include/uapi/asm/elf.h index e892d5061685..441e343f8268 100644 --- a/arch/openrisc/include/uapi/asm/elf.h +++ b/arch/openrisc/include/uapi/asm/elf.h @@ -34,15 +34,72 @@ #include <asm/ptrace.h> /* The OR1K relocation types... not all relevant for module loader */ -#define R_OR32_NONE 0 -#define R_OR32_32 1 -#define R_OR32_16 2 -#define R_OR32_8 3 -#define R_OR32_CONST 4 -#define R_OR32_CONSTH 5 -#define R_OR32_JUMPTARG 6 -#define R_OR32_VTINHERIT 7 -#define R_OR32_VTENTRY 8 +#define R_OR1K_NONE 0 +#define R_OR1K_32 1 +#define R_OR1K_16 2 +#define R_OR1K_8 3 +#define R_OR1K_LO_16_IN_INSN 4 +#define R_OR1K_HI_16_IN_INSN 5 +#define R_OR1K_INSN_REL_26 6 +#define R_OR1K_GNU_VTENTRY 7 +#define R_OR1K_GNU_VTINHERIT 8 +#define R_OR1K_32_PCREL 9 +#define R_OR1K_16_PCREL 10 +#define R_OR1K_8_PCREL 11 +#define R_OR1K_GOTPC_HI16 12 +#define R_OR1K_GOTPC_LO16 13 +#define R_OR1K_GOT16 14 +#define R_OR1K_PLT26 15 +#define R_OR1K_GOTOFF_HI16 16 +#define R_OR1K_GOTOFF_LO16 17 +#define R_OR1K_COPY 18 +#define R_OR1K_GLOB_DAT 19 +#define R_OR1K_JMP_SLOT 20 +#define R_OR1K_RELATIVE 21 +#define R_OR1K_TLS_GD_HI16 22 +#define R_OR1K_TLS_GD_LO16 23 +#define R_OR1K_TLS_LDM_HI16 24 +#define R_OR1K_TLS_LDM_LO16 25 +#define R_OR1K_TLS_LDO_HI16 26 +#define R_OR1K_TLS_LDO_LO16 27 +#define R_OR1K_TLS_IE_HI16 28 +#define R_OR1K_TLS_IE_LO16 29 +#define R_OR1K_TLS_LE_HI16 30 +#define R_OR1K_TLS_LE_LO16 31 +#define R_OR1K_TLS_TPOFF 32 +#define R_OR1K_TLS_DTPOFF 33 +#define R_OR1K_TLS_DTPMOD 34 +#define R_OR1K_AHI16 35 +#define R_OR1K_GOTOFF_AHI16 36 +#define R_OR1K_TLS_IE_AHI16 37 +#define R_OR1K_TLS_LE_AHI16 38 +#define R_OR1K_SLO16 39 +#define R_OR1K_GOTOFF_SLO16 40 +#define R_OR1K_TLS_LE_SLO16 41 +#define R_OR1K_PCREL_PG21 42 +#define R_OR1K_GOT_PG21 43 +#define R_OR1K_TLS_GD_PG21 44 +#define R_OR1K_TLS_LDM_PG21 45 +#define R_OR1K_TLS_IE_PG21 46 +#define R_OR1K_LO13 47 +#define R_OR1K_GOT_LO13 48 +#define R_OR1K_TLS_GD_LO13 49 +#define R_OR1K_TLS_LDM_LO13 50 +#define R_OR1K_TLS_IE_LO13 51 +#define R_OR1K_SLO13 52 +#define R_OR1K_PLTA26 53 +#define R_OR1K_GOT_AHI16 54 + +/* Old relocation names */ +#define R_OR32_NONE R_OR1K_NONE +#define R_OR32_32 R_OR1K_32 +#define R_OR32_16 R_OR1K_16 +#define R_OR32_8 R_OR1K_8 +#define R_OR32_CONST R_OR1K_LO_16_IN_INSN +#define R_OR32_CONSTH R_OR1K_HI_16_IN_INSN +#define R_OR32_JUMPTARG R_OR1K_INSN_REL_26 +#define R_OR32_VTENTRY R_OR1K_GNU_VTENTRY +#define R_OR32_VTINHERIT R_OR1K_GNU_VTINHERIT typedef unsigned long elf_greg_t; @@ -53,8 +110,7 @@ typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -/* A placeholder; OR32 does not have fp support yes, so no fp regs for now. */ -typedef unsigned long elf_fpregset_t; +typedef struct __or1k_fpu_state elf_fpregset_t; /* EM_OPENRISC is defined in linux/elf-em.h */ #define EM_OR32 0x8472 diff --git a/arch/openrisc/include/uapi/asm/ptrace.h b/arch/openrisc/include/uapi/asm/ptrace.h index d4fab268f6aa..a77cc9915ca8 100644 --- a/arch/openrisc/include/uapi/asm/ptrace.h +++ b/arch/openrisc/include/uapi/asm/ptrace.h @@ -30,6 +30,10 @@ struct user_regs_struct { unsigned long pc; unsigned long sr; }; + +struct __or1k_fpu_state { + unsigned long fpcsr; +}; #endif diff --git a/arch/openrisc/include/uapi/asm/sigcontext.h b/arch/openrisc/include/uapi/asm/sigcontext.h index 8ab775fc3450..e7ffb58ff58f 100644 --- a/arch/openrisc/include/uapi/asm/sigcontext.h +++ b/arch/openrisc/include/uapi/asm/sigcontext.h @@ -28,7 +28,10 @@ struct sigcontext { struct user_regs_struct regs; /* needs to be first */ - unsigned long oldmask; + union { + unsigned long fpcsr; + unsigned long oldmask; /* unused */ + }; }; #endif /* __ASM_OPENRISC_SIGCONTEXT_H */ diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 566f8c4f8047..46b94d454efd 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -17,16 +17,4 @@ * (at your option) any later version. */ -#define sys_mmap2 sys_mmap_pgoff - -#define __ARCH_WANT_RENAMEAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SET_GET_RLIMIT -#define __ARCH_WANT_SYS_FORK -#define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_TIME32_SYSCALLS - -#include <asm-generic/unistd.h> - -#define __NR_or1k_atomic __NR_arch_specific_syscall -__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic) +#include <asm/unistd_32.h> diff --git a/arch/openrisc/kernel/.gitignore b/arch/openrisc/kernel/.gitignore index c5f676c3c224..bbb90f92d051 100644 --- a/arch/openrisc/kernel/.gitignore +++ b/arch/openrisc/kernel/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only vmlinux.lds diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile index 2d172e79f58d..79129161f3e0 100644 --- a/arch/openrisc/kernel/Makefile +++ b/arch/openrisc/kernel/Makefile @@ -3,9 +3,9 @@ # Makefile for the linux kernel. # -extra-y := head.o vmlinux.lds +extra-y := vmlinux.lds -obj-y := setup.o or32_ksyms.o process.o dma.o \ +obj-y := head.o setup.o or32_ksyms.o process.o dma.o \ traps.o time.o irq.o entry.o ptrace.o signal.o \ sys_call_table.o unwinder.o diff --git a/arch/openrisc/kernel/Makefile.syscalls b/arch/openrisc/kernel/Makefile.syscalls new file mode 100644 index 000000000000..525a1e7e7fc9 --- /dev/null +++ b/arch/openrisc/kernel/Makefile.syscalls @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +syscall_abis_32 += or1k time32 stat64 rlimit renameat diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c index e435ae01c600..710651d5aaae 100644 --- a/arch/openrisc/kernel/asm-offsets.c +++ b/arch/openrisc/kernel/asm-offsets.c @@ -32,13 +32,11 @@ #include <linux/thread_info.h> #include <linux/kbuild.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/processor.h> int main(void) { /* offsets into the task_struct */ - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index adec711ad39d..b3edbb33b621 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -11,11 +11,9 @@ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * DMA mapping callbacks... - * As alloc_coherent is the only DMA callback being used currently, that's - * the only thing implemented properly. The rest need looking into... */ -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/pagewalk.h> #include <asm/cpuinfo.h> @@ -35,7 +33,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, * Flush the page out of the TLB so that the new page flags get * picked up next time there's an access */ - flush_tlb_page(NULL, addr); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); /* Flush page out of dcache */ for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) @@ -58,7 +56,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, * Flush the page out of the TLB so that the new page flags get * picked up next time there's an access */ - flush_tlb_page(NULL, addr); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); return 0; } @@ -67,62 +65,34 @@ static const struct mm_walk_ops clear_nocache_walk_ops = { .pte_entry = page_clear_nocache, }; -/* - * Alloc "coherent" memory, which for OpenRISC means simply uncached. - * - * This function effectively just calls __get_free_pages, sets the - * cache-inhibit bit on those pages, and makes sure that the pages are - * flushed out of the cache before they are used. - * - * If the NON_CONSISTENT attribute is set, then this function just - * returns "normal", cachable memory. - * - * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take - * into consideration here, too. All current known implementations of - * the OR1K support only strongly ordered memory accesses, so that flag - * is being ignored for now; uncached but write-combined memory is a - * missing feature of the OR1K. - */ -void * -arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) +void *arch_dma_set_uncached(void *cpu_addr, size_t size) { - unsigned long va; - void *page; - - page = alloc_pages_exact(size, gfp | __GFP_ZERO); - if (!page) - return NULL; - - /* This gives us the real physical address of the first page. */ - *dma_handle = __pa(page); - - va = (unsigned long)page; + unsigned long va = (unsigned long)cpu_addr; + int error; /* * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. */ - if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, - NULL)) { - free_pages_exact(page, size); - return NULL; - } - - return (void *)va; + mmap_write_lock(&init_mm); + error = walk_page_range_novma(&init_mm, va, va + size, + &set_nocache_walk_ops, NULL, NULL); + mmap_write_unlock(&init_mm); + + if (error) + return ERR_PTR(error); + return cpu_addr; } -void -arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) +void arch_dma_clear_uncached(void *cpu_addr, size_t size) { - unsigned long va = (unsigned long)vaddr; + unsigned long va = (unsigned long)cpu_addr; + mmap_write_lock(&init_mm); /* walk_page_range shouldn't be able to fail here */ - WARN_ON(walk_page_range(&init_mm, va, va + size, - &clear_nocache_walk_ops, NULL)); - - free_pages_exact(vaddr, size); + WARN_ON(walk_page_range_novma(&init_mm, va, va + size, + &clear_nocache_walk_ops, NULL, NULL)); + mmap_write_unlock(&init_mm); } void arch_sync_dma_for_device(phys_addr_t addr, size_t size, diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index e4a78571f883..c7e90b09645e 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -13,6 +13,7 @@ */ #include <linux/linkage.h> +#include <linux/pgtable.h> #include <asm/processor.h> #include <asm/unistd.h> @@ -21,7 +22,6 @@ #include <asm/spr_defs.h> #include <asm/page.h> #include <asm/mmu.h> -#include <asm/pgtable.h> #include <asm/asm-offsets.h> #define DISABLE_INTERRUPTS(t1,t2) \ @@ -173,7 +173,6 @@ handler: ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ /* r30 already save */ ;\ -/* l.sw PT_GPR30(r1),r30*/ ;\ l.sw PT_GPR31(r1),r31 ;\ TRACE_IRQS_OFF_ENTRY ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ @@ -211,9 +210,8 @@ handler: ;\ l.sw PT_GPR27(r1),r27 ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ - /* r31 already saved */ ;\ - l.sw PT_GPR30(r1),r30 ;\ -/* l.sw PT_GPR31(r1),r31 */ ;\ + /* r30 already saved */ ;\ + l.sw PT_GPR31(r1),r31 ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ l.addi r30,r0,-1 ;\ l.sw PT_ORIG_GPR11(r1),r30 ;\ @@ -241,6 +239,8 @@ handler: ;\ /* =====================================================[ exceptions] === */ + __REF + /* ---[ 0x100: RESET exception ]----------------------------------------- */ EXCEPTION_ENTRY(_tng_kernel_start) @@ -326,7 +326,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler) 1: l.ori r6,r0,0x0 // !write access 2: - /* call fault.c handler in or32/mm/fault.c */ + /* call fault.c handler in openrisc/mm/fault.c */ l.jal do_page_fault l.nop l.j _ret_from_exception @@ -348,7 +348,7 @@ EXCEPTION_ENTRY(_insn_page_fault_handler) /* r4 set be EXCEPTION_HANDLE */ // effective address of fault l.ori r6,r0,0x0 // !write access - /* call fault.c handler in or32/mm/fault.c */ + /* call fault.c handler in openrisc/mm/fault.c */ l.jal do_page_fault l.nop l.j _ret_from_exception @@ -547,11 +547,12 @@ EXCEPTION_ENTRY(_external_irq_handler) l.bnf 1f // ext irq enabled, all ok. l.nop +#ifdef CONFIG_PRINTK l.addi r1,r1,-0x8 l.movhi r3,hi(42f) l.ori r3,r3,lo(42f) l.sw 0x0(r1),r3 - l.jal printk + l.jal _printk l.sw 0x4(r1),r4 l.addi r1,r1,0x8 @@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler) .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" .align 4 .previous +#endif l.ori r4,r4,SPR_SR_IEE // fix the bug // l.sw PT_SR(r1),r4 @@ -567,8 +569,8 @@ EXCEPTION_ENTRY(_external_irq_handler) #endif CLEAR_LWA_FLAG(r3) l.addi r3,r1,0 - l.movhi r8,hi(do_IRQ) - l.ori r8,r8,lo(do_IRQ) + l.movhi r8,hi(generic_handle_arch_irq) + l.ori r8,r8,lo(generic_handle_arch_irq) l.jalr r8 l.nop l.j _ret_from_intr @@ -599,7 +601,7 @@ UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) */ _string_syscall_return: - .string "syscall return %ld \n\r\0" + .string "syscall r9:0x%08x -> syscall(%ld) return %ld\0" .align 4 ENTRY(_sys_call_handler) @@ -677,15 +679,25 @@ _syscall_return: _syscall_debug: l.movhi r3,hi(_string_syscall_return) l.ori r3,r3,lo(_string_syscall_return) - l.ori r27,r0,1 + l.ori r27,r0,2 l.sw -4(r1),r27 l.sw -8(r1),r11 - l.addi r1,r1,-8 - l.movhi r27,hi(printk) - l.ori r27,r27,lo(printk) + l.lwz r29,PT_ORIG_GPR11(r1) + l.sw -12(r1),r29 + l.lwz r29,PT_GPR9(r1) + l.sw -16(r1),r29 + l.movhi r27,hi(_printk) + l.ori r27,r27,lo(_printk) l.jalr r27 - l.nop - l.addi r1,r1,8 + l.addi r1,r1,-16 + l.addi r1,r1,16 +#endif +#if 0 +_syscall_show_regs: + l.movhi r27,hi(show_registers) + l.ori r27,r27,lo(show_registers) + l.jalr r27 + l.or r3,r1,r1 #endif _syscall_check_trace_leave: @@ -702,6 +714,10 @@ _syscall_check_trace_leave: * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ _syscall_check_work: +#ifdef CONFIG_DEBUG_RSEQ + l.jal rseq_syscall + l.ori r3,r1,0 +#endif /* Here we need to disable interrupts */ DISABLE_INTERRUPTS(r27,r29) TRACE_IRQS_OFF @@ -832,9 +848,17 @@ _syscall_badsys: /******* END SYSCALL HANDLING *******/ -/* ---[ 0xd00: Trap exception ]------------------------------------------ */ +/* ---[ 0xd00: Floating Point exception ]-------------------------------- */ -UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) +EXCEPTION_ENTRY(_fpe_trap_handler) + CLEAR_LWA_FLAG(r3) + + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_fpe_trap + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop /* ---[ 0xe00: Trap exception ]------------------------------------------ */ @@ -999,11 +1023,10 @@ ENTRY(ret_from_fork) l.lwz r11,PT_GPR11(r1) /* The syscall fast path return expects call-saved registers - * r12-r28 to be untouched, so we restore them here as they + * r14-r28 to be untouched, so we restore them here as they * will have been effectively clobbered when arriving here * via the call to switch() */ - l.lwz r12,PT_GPR12(r1) l.lwz r14,PT_GPR14(r1) l.lwz r16,PT_GPR16(r1) l.lwz r18,PT_GPR18(r1) @@ -1035,10 +1058,10 @@ ENTRY(ret_from_fork) /* _switch MUST never lay on page boundry, cause it runs from * effective addresses and beeing interrupted by iTLB miss would kill it. - * dTLB miss seams to never accour in the bad place since data accesses + * dTLB miss seems to never accour in the bad place since data accesses * are from task structures which are always page aligned. * - * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR + * The problem happens in RESTORE_ALL where we first set the EPCR * register, then load the previous register values and only at the end call * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets * garbled and we end up calling l.rfe with the wrong EPCR. (same probably @@ -1066,9 +1089,8 @@ ENTRY(_switch) /* No need to store r1/PT_SP as it goes into KSP below */ l.sw PT_GPR2(r1),r2 l.sw PT_GPR9(r1),r9 - /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being - * and expects r12 to be callee-saved... */ - l.sw PT_GPR12(r1),r12 + + /* Save callee-saved registers to the new pt_regs */ l.sw PT_GPR14(r1),r14 l.sw PT_GPR16(r1),r16 l.sw PT_GPR18(r1),r18 @@ -1109,9 +1131,7 @@ ENTRY(_switch) /* No need to restore r10 */ /* ...and do not restore r11 */ - /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being - * and expects r12 to be callee-saved... */ - l.lwz r12,PT_GPR12(r1) + /* Restore callee-saved registers */ l.lwz r14,PT_GPR14(r1) l.lwz r16,PT_GPR16(r1) l.lwz r18,PT_GPR18(r1) @@ -1164,15 +1184,18 @@ _fork_save_extra_regs_and_call: ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) - l.ori r29,r29,lo(sys_clone) l.j _fork_save_extra_regs_and_call - l.addi r7,r1,0 + l.ori r29,r29,lo(sys_clone) + +ENTRY(__sys_clone3) + l.movhi r29,hi(sys_clone3) + l.j _fork_save_extra_regs_and_call + l.ori r29,r29,lo(sys_clone3) ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) - l.ori r29,r29,lo(sys_fork) l.j _fork_save_extra_regs_and_call - l.addi r3,r1,0 + l.ori r29,r29,lo(sys_fork) ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index b0dc974f9a74..bd760066f1cd 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -16,25 +16,25 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/serial_reg.h> +#include <linux/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> -#include <asm/pgtable.h> #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/spr_defs.h> #include <asm/asm-offsets.h> #include <linux/of_fdt.h> -#define tophys(rd,rs) \ - l.movhi rd,hi(-KERNELBASE) ;\ +#define tophys(rd,rs) \ + l.movhi rd,hi(-KERNELBASE) ;\ l.add rd,rd,rs -#define CLEAR_GPR(gpr) \ +#define CLEAR_GPR(gpr) \ l.movhi gpr,0x0 -#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ - l.movhi gpr,hi(symbol) ;\ +#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ + l.movhi gpr,hi(symbol) ;\ l.ori gpr,gpr,lo(symbol) @@ -297,19 +297,23 @@ /* temporary store r3, r9 into r1, r10 */ ;\ l.addi r1,r3,0x0 ;\ l.addi r10,r9,0x0 ;\ - /* the string referenced by r3 must be low enough */ ;\ + LOAD_SYMBOL_2_GPR(r9,_string_unhandled_exception) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_unhandled_exception) ;\ + l.nop ;\ l.mfspr r3,r0,SPR_NPC ;\ l.jal _emergency_print_nr ;\ - l.andi r3,r3,0x1f00 ;\ - /* the string referenced by r3 must be low enough */ ;\ + l.andi r3,r3,0x1f00 ;\ + LOAD_SYMBOL_2_GPR(r9,_string_epc_prefix) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_epc_prefix) ;\ + l.nop ;\ l.jal _emergency_print_nr ;\ - l.mfspr r3,r0,SPR_EPCR_BASE ;\ + l.mfspr r3,r0,SPR_EPCR_BASE ;\ + LOAD_SYMBOL_2_GPR(r9,_string_nl) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_nl) ;\ + l.nop ;\ /* end of printing */ ;\ l.addi r3,r1,0x0 ;\ l.addi r9,r10,0x0 ;\ @@ -322,21 +326,21 @@ l.addi r1,r1,-(INT_FRAME_SIZE) ;\ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ - l.sw PT_GPR12(r30),r12 ;\ + l.sw PT_GPR12(r30),r12 ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ l.sw PT_SR(r30),r12 ;\ /* save r31 */ ;\ EXCEPTION_T_LOAD_GPR30(r12) ;\ - l.sw PT_GPR30(r30),r12 ;\ + l.sw PT_GPR30(r30),r12 ;\ /* save r10 as was prior to exception */ ;\ EXCEPTION_T_LOAD_GPR10(r12) ;\ - l.sw PT_GPR10(r30),r12 ;\ - /* save PT_SP as was prior to exception */ ;\ + l.sw PT_GPR10(r30),r12 ;\ + /* save PT_SP as was prior to exception */ ;\ EXCEPTION_T_LOAD_SP(r12) ;\ l.sw PT_SP(r30),r12 ;\ - l.sw PT_GPR13(r30),r13 ;\ + l.sw PT_GPR13(r30),r13 ;\ /* --> */ ;\ /* save exception r4, set r4 = EA */ ;\ l.sw PT_GPR4(r30),r4 ;\ @@ -353,6 +357,8 @@ /* =====================================================[ exceptions] === */ + __HEAD + /* ---[ 0x100: RESET exception ]----------------------------------------- */ .org 0x100 /* Jump to .init code at _start which lives in the .head section @@ -390,7 +396,7 @@ _dispatch_do_ipage_fault: .org 0x500 EXCEPTION_HANDLE(_timer_handler) -/* ---[ 0x600: Alignment exception ]-------------------------------------- */ +/* ---[ 0x600: Alignment exception ]------------------------------------- */ .org 0x600 EXCEPTION_HANDLE(_alignment_handler) @@ -420,9 +426,9 @@ _dispatch_do_ipage_fault: .org 0xc00 EXCEPTION_HANDLE(_sys_call_handler) -/* ---[ 0xd00: Trap exception ]------------------------------------------ */ +/* ---[ 0xd00: Floating point exception ]-------------------------------- */ .org 0xd00 - UNHANDLED_EXCEPTION(_vector_0xd00) + EXCEPTION_HANDLE(_fpe_trap_handler) /* ---[ 0xe00: Trap exception ]------------------------------------------ */ .org 0xe00 @@ -502,10 +508,10 @@ _dispatch_do_ipage_fault: /* .text*/ -/* This early stuff belongs in HEAD, but some of the functions below definitely +/* This early stuff belongs in the .init.text section, but some of the functions below definitely * don't... */ - __HEAD + __INIT .global _start _start: /* Init r0 to zero as per spec */ @@ -521,6 +527,15 @@ _start: l.ori r3,r0,0x1 l.mtspr r0,r3,SPR_SR + /* + * Start the TTCR as early as possible, so that the RNG can make use of + * measurements of boot time from the earliest opportunity. Especially + * important is that the TTCR does not return zero by the time we reach + * random_init(). + */ + l.movhi r3,hi(SPR_TTMR_CR) + l.mtspr r0,r3,SPR_TTMR + CLEAR_GPR(r1) CLEAR_GPR(r2) CLEAR_GPR(r3) @@ -599,7 +614,7 @@ flush_tlb: l.jal _flush_tlb l.nop -/* The MMU needs to be enabled before or32_early_setup is called */ +/* The MMU needs to be enabled before or1k_early_setup is called */ enable_mmu: /* @@ -641,9 +656,9 @@ enable_mmu: /* magic number mismatch, set fdt pointer to null */ l.or r25,r0,r0 _fdt_found: - /* pass fdt pointer to or32_early_setup in r3 */ + /* pass fdt pointer to or1k_early_setup in r3 */ l.or r3,r0,r25 - LOAD_SYMBOL_2_GPR(r24, or32_early_setup) + LOAD_SYMBOL_2_GPR(r24, or1k_early_setup) l.jalr r24 l.nop @@ -803,7 +818,7 @@ secondary_start: #endif -/* ========================================[ cache ]=== */ +/* ==========================================================[ cache ]=== */ /* alignment here so we don't change memory offsets with * memory controller defined @@ -1321,274 +1336,110 @@ i_pte_not_present: /* =================================================[ debugging aids ]=== */ - .align 64 -_immu_trampoline: - .space 64 -_immu_trampoline_top: - -#define TRAMP_SLOT_0 (0x0) -#define TRAMP_SLOT_1 (0x4) -#define TRAMP_SLOT_2 (0x8) -#define TRAMP_SLOT_3 (0xc) -#define TRAMP_SLOT_4 (0x10) -#define TRAMP_SLOT_5 (0x14) -#define TRAMP_FRAME_SIZE (0x18) - -ENTRY(_immu_trampoline_workaround) - // r2 EEA - // r6 is physical EEA - tophys(r6,r2) - - LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) - tophys (r3,r5) // r3 is trampoline (physical) - - LOAD_SYMBOL_2_GPR(r4,0x15000000) - l.sw TRAMP_SLOT_0(r3),r4 - l.sw TRAMP_SLOT_1(r3),r4 - l.sw TRAMP_SLOT_4(r3),r4 - l.sw TRAMP_SLOT_5(r3),r4 - - // EPC = EEA - 0x4 - l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) - l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data - l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) - l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data - - l.srli r5,r4,26 // check opcode for write access - l.sfeqi r5,0 // l.j - l.bf 0f - l.sfeqi r5,0x11 // l.jr - l.bf 1f - l.sfeqi r5,1 // l.jal - l.bf 2f - l.sfeqi r5,0x12 // l.jalr - l.bf 3f - l.sfeqi r5,3 // l.bnf - l.bf 4f - l.sfeqi r5,4 // l.bf - l.bf 5f -99: - l.nop - l.j 99b // should never happen - l.nop 1 - - // r2 is EEA - // r3 is trampoline address (physical) - // r4 is instruction - // r6 is physical(EEA) - // - // r5 - -2: // l.jal - - /* 19 20 aa aa l.movhi r9,0xaaaa - * a9 29 bb bb l.ori r9,0xbbbb - * - * where 0xaaaabbbb is EEA + 0x4 shifted right 2 - */ - - l.addi r6,r2,0x4 // this is 0xaaaabbbb - - // l.movhi r9,0xaaaa - l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 - l.sh (TRAMP_SLOT_0+0x0)(r3),r5 - l.srli r5,r6,16 - l.sh (TRAMP_SLOT_0+0x2)(r3),r5 - - // l.ori r9,0xbbbb - l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 - l.sh (TRAMP_SLOT_1+0x0)(r3),r5 - l.andi r5,r6,0xffff - l.sh (TRAMP_SLOT_1+0x2)(r3),r5 - - /* falthrough, need to set up new jump offset */ - - -0: // l.j - l.slli r6,r4,6 // original offset shifted left 6 - 2 -// l.srli r6,r6,6 // original offset shifted right 2 - - l.slli r4,r2,4 // old jump position: EEA shifted left 4 -// l.srli r4,r4,6 // old jump position: shifted right 2 - - l.addi r5,r3,0xc // new jump position (physical) - l.slli r5,r5,4 // new jump position: shifted left 4 - - // calculate new jump offset - // new_off = old_off + (old_jump - new_jump) - - l.sub r5,r4,r5 // old_jump - new_jump - l.add r5,r6,r5 // orig_off + (old_jump - new_jump) - l.srli r5,r5,6 // new offset shifted right 2 - - // r5 is new jump offset - // l.j has opcode 0x0... - l.sw TRAMP_SLOT_2(r3),r5 // write it back - - l.j trampoline_out - l.nop - -/* ----------------------------- */ - -3: // l.jalr - - /* 19 20 aa aa l.movhi r9,0xaaaa - * a9 29 bb bb l.ori r9,0xbbbb - * - * where 0xaaaabbbb is EEA + 0x4 shifted right 2 - */ - - l.addi r6,r2,0x4 // this is 0xaaaabbbb - - // l.movhi r9,0xaaaa - l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 - l.sh (TRAMP_SLOT_0+0x0)(r3),r5 - l.srli r5,r6,16 - l.sh (TRAMP_SLOT_0+0x2)(r3),r5 - - // l.ori r9,0xbbbb - l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 - l.sh (TRAMP_SLOT_1+0x0)(r3),r5 - l.andi r5,r6,0xffff - l.sh (TRAMP_SLOT_1+0x2)(r3),r5 - - l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction - l.andi r5,r5,0x3ff // clear out opcode part - l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr - l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back - - /* falthrough */ - -1: // l.jr - l.j trampoline_out - l.nop - -/* ----------------------------- */ - -4: // l.bnf -5: // l.bf - l.slli r6,r4,6 // original offset shifted left 6 - 2 -// l.srli r6,r6,6 // original offset shifted right 2 - - l.slli r4,r2,4 // old jump position: EEA shifted left 4 -// l.srli r4,r4,6 // old jump position: shifted right 2 - - l.addi r5,r3,0xc // new jump position (physical) - l.slli r5,r5,4 // new jump position: shifted left 4 - - // calculate new jump offset - // new_off = old_off + (old_jump - new_jump) - - l.add r6,r6,r4 // (orig_off + old_jump) - l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump - l.srli r6,r6,6 // new offset shifted right 2 - - // r6 is new jump offset - l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction - l.srli r4,r4,16 - l.andi r4,r4,0xfc00 // get opcode part - l.slli r4,r4,16 - l.or r6,r4,r6 // l.b(n)f new offset - l.sw TRAMP_SLOT_2(r3),r6 // write it back - - /* we need to add l.j to EEA + 0x8 */ - tophys (r4,r2) // may not be needed (due to shifts down_ - l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) - // jump position = r5 + 0x8 (0x8 compensated) - l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 - - l.slli r4,r4,4 // the amount of info in imediate of jump - l.srli r4,r4,6 // jump instruction with offset - l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot - - /* fallthrough */ - -trampoline_out: - // set up new EPC to point to our trampoline code - LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) - l.mtspr r0,r5,SPR_EPCR_BASE - - // immu_trampoline is (4x) CACHE_LINE aligned - // and only 6 instructions long, - // so we need to invalidate only 2 lines - - /* Establish cache block size - If BS=0, 16; - If BS=1, 32; - r14 contain block size - */ - l.mfspr r21,r0,SPR_ICCFGR - l.andi r21,r21,SPR_ICCFGR_CBS - l.srli r21,r21,7 - l.ori r23,r0,16 - l.sll r14,r23,r21 - - l.mtspr r0,r5,SPR_ICBIR - l.add r5,r5,r14 - l.mtspr r0,r5,SPR_ICBIR - - l.jr r9 - l.nop - - /* - * DSCR: prints a string referenced by r3. + * DESC: Prints ASCII character stored in r7 * - * PRMS: r3 - address of the first character of null - * terminated string to be printed + * PRMS: r7 - a 32-bit value with an ASCII character in the first byte + * position. * - * PREQ: UART at UART_BASE_ADD has to be initialized + * PREQ: The UART at UART_BASE_ADD has to be initialized * - * POST: caller should be aware that r3, r9 are changed + * POST: internally used but restores: + * r4 - to store UART_BASE_ADD + * r5 - for loading OFF_TXFULL / THRE,TEMT + * r6 - for storing bitmask (SERIAL_8250) */ -ENTRY(_emergency_print) +ENTRY(_emergency_putc) EMERGENCY_PRINT_STORE_GPR4 EMERGENCY_PRINT_STORE_GPR5 EMERGENCY_PRINT_STORE_GPR6 - EMERGENCY_PRINT_STORE_GPR7 -2: - l.lbz r7,0(r3) - l.sfeq r7,r0 - l.bf 9f - l.nop -// putc: l.movhi r4,hi(UART_BASE_ADD) + l.ori r4,r4,lo(UART_BASE_ADD) +#if defined(CONFIG_SERIAL_LITEUART) + /* Check OFF_TXFULL status */ +1: l.lwz r5,4(r4) + l.andi r5,r5,0xff + l.sfnei r5,0 + l.bf 1b + l.nop + + /* Write character */ + l.andi r7,r7,0xff + l.sw 0(r4),r7 +#elif defined(CONFIG_SERIAL_8250) + /* Check UART LSR THRE (hold) bit */ l.addi r6,r0,0x20 1: l.lbz r5,5(r4) l.andi r5,r5,0x20 l.sfeq r5,r6 l.bnf 1b - l.nop + l.nop + /* Write character */ l.sb 0(r4),r7 + /* Check UART LSR THRE|TEMT (hold, empty) bits */ l.addi r6,r0,0x60 1: l.lbz r5,5(r4) l.andi r5,r5,0x60 l.sfeq r5,r6 l.bnf 1b - l.nop + l.nop +#endif + EMERGENCY_PRINT_LOAD_GPR6 + EMERGENCY_PRINT_LOAD_GPR5 + EMERGENCY_PRINT_LOAD_GPR4 + l.jr r9 + l.nop + +/* + * DSCR: prints a string referenced by r3. + * + * PRMS: r3 - address of the first character of null + * terminated string to be printed + * + * PREQ: UART at UART_BASE_ADD has to be initialized + * + * POST: caller should be aware that r3, r9 are changed + */ +ENTRY(_emergency_print) + EMERGENCY_PRINT_STORE_GPR7 + EMERGENCY_PRINT_STORE_GPR9 + + /* Load character to r7, check for null terminator */ +2: l.lbz r7,0(r3) + l.sfeqi r7,0x0 + l.bf 9f + l.nop + + l.jal _emergency_putc + l.nop /* next character */ l.j 2b - l.addi r3,r3,0x1 + l.addi r3,r3,0x1 9: + EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR7 - EMERGENCY_PRINT_LOAD_GPR6 - EMERGENCY_PRINT_LOAD_GPR5 - EMERGENCY_PRINT_LOAD_GPR4 l.jr r9 - l.nop + l.nop +/* + * DSCR: prints a number in r3 in hex. + * + * PRMS: r3 - a 32-bit unsigned integer + * + * PREQ: UART at UART_BASE_ADD has to be initialized + * + * POST: caller should be aware that r3, r9 are changed + */ ENTRY(_emergency_print_nr) - EMERGENCY_PRINT_STORE_GPR4 - EMERGENCY_PRINT_STORE_GPR5 - EMERGENCY_PRINT_STORE_GPR6 EMERGENCY_PRINT_STORE_GPR7 EMERGENCY_PRINT_STORE_GPR8 + EMERGENCY_PRINT_STORE_GPR9 l.addi r8,r0,32 // shift register @@ -1600,58 +1451,39 @@ ENTRY(_emergency_print_nr) /* don't skip the last zero if number == 0x0 */ l.sfeqi r8,0x4 l.bf 2f - l.nop + l.nop l.sfeq r7,r0 l.bf 1b - l.nop + l.nop 2: l.srl r7,r3,r8 l.andi r7,r7,0xf l.sflts r8,r0 - l.bf 9f + l.bf 9f + /* Numbers greater than 9 translate to a-f */ l.sfgtui r7,0x9 l.bnf 8f - l.nop + l.nop l.addi r7,r7,0x27 -8: - l.addi r7,r7,0x30 -// putc: - l.movhi r4,hi(UART_BASE_ADD) - - l.addi r6,r0,0x20 -1: l.lbz r5,5(r4) - l.andi r5,r5,0x20 - l.sfeq r5,r6 - l.bnf 1b - l.nop - - l.sb 0(r4),r7 - - l.addi r6,r0,0x60 -1: l.lbz r5,5(r4) - l.andi r5,r5,0x60 - l.sfeq r5,r6 - l.bnf 1b - l.nop + /* Convert to ascii and output character */ +8: l.jal _emergency_putc + l.addi r7,r7,0x30 /* next character */ l.j 2b l.addi r8,r8,-0x4 9: + EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR8 EMERGENCY_PRINT_LOAD_GPR7 - EMERGENCY_PRINT_LOAD_GPR6 - EMERGENCY_PRINT_LOAD_GPR5 - EMERGENCY_PRINT_LOAD_GPR4 l.jr r9 - l.nop - + l.nop /* * This should be used for debugging only. @@ -1676,7 +1508,9 @@ ENTRY(_emergency_print_nr) ENTRY(_early_uart_init) l.movhi r3,hi(UART_BASE_ADD) + l.ori r3,r3,lo(UART_BASE_ADD) +#if defined(CONFIG_SERIAL_8250) l.addi r4,r0,0x7 l.sb 0x2(r3),r4 @@ -1694,9 +1528,10 @@ ENTRY(_early_uart_init) l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) l.sb UART_DLL(r3),r4 l.sb 0x3(r3),r5 +#endif l.jr r9 - l.nop + l.nop .align 0x1000 .global _secondary_evbar @@ -1711,13 +1546,13 @@ _secondary_evbar: .section .rodata _string_unhandled_exception: - .string "\n\rRunarunaround: Unhandled exception 0x\0" + .string "\r\nRunarunaround: Unhandled exception 0x\0" _string_epc_prefix: .string ": EPC=0x\0" _string_nl: - .string "\n\r\0" + .string "\r\n\0" /* ========================================[ page aligned structures ]=== */ diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c index c38fa863afa8..f38e10962a84 100644 --- a/arch/openrisc/kernel/irq.c +++ b/arch/openrisc/kernel/irq.c @@ -36,8 +36,3 @@ void __init init_IRQ(void) { irqchip_init(); } - -void __irq_entry do_IRQ(struct pt_regs *regs) -{ - handle_arch_irq(regs); -} diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c index 532013f523ac..c9ff4c4a0b29 100644 --- a/arch/openrisc/kernel/module.c +++ b/arch/openrisc/kernel/module.c @@ -39,22 +39,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value = sym->st_value + rel[i].r_addend; switch (ELF32_R_TYPE(rel[i].r_info)) { - case R_OR32_32: + case R_OR1K_32: *location = value; break; - case R_OR32_CONST: + case R_OR1K_LO_16_IN_INSN: *((uint16_t *)location + 1) = value; break; - case R_OR32_CONSTH: + case R_OR1K_HI_16_IN_INSN: *((uint16_t *)location + 1) = value >> 16; break; - case R_OR32_JUMPTARG: + case R_OR1K_INSN_REL_26: value -= (uint32_t)location; value >>= 2; value &= 0x03ffffff; value |= *location & 0xfc000000; *location = value; break; + case R_OR1K_AHI16: + /* Adjust the operand to match with a signed LO16. */ + value += 0x8000; + *((uint16_t *)location + 1) = value >> 16; + break; + case R_OR1K_SLO16: + /* Split value lower 16-bits. */ + value = ((value & 0xf800) << 10) | (value & 0x7ff); + *location = (*location & ~0x3e007ff) | value; + break; default: pr_err("module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c index 7d6a62eee2ef..212e5f85004c 100644 --- a/arch/openrisc/kernel/or32_ksyms.c +++ b/arch/openrisc/kernel/or32_ksyms.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/semaphore.h> +#include <linux/pgtable.h> #include <asm/processor.h> #include <linux/uaccess.h> @@ -25,8 +26,6 @@ #include <asm/io.h> #include <asm/hardirq.h> #include <asm/delay.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index b06f84f6676f..eef99fee2110 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -14,8 +14,7 @@ */ #define __KERNEL_SYSCALLS__ -#include <stdarg.h> - +#include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -34,12 +33,14 @@ #include <linux/init_task.h> #include <linux/mqueue.h> #include <linux/fs.h> +#include <linux/reboot.h> #include <linux/uaccess.h> -#include <asm/pgtable.h> +#include <asm/fpu.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/spr_defs.h> +#include <asm/switch_to.h> #include <linux/smp.h> @@ -50,9 +51,27 @@ */ struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, }; -void machine_restart(void) +void machine_restart(char *cmd) +{ + do_kernel_restart(cmd); + + __asm__("l.nop 13"); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + pr_emerg("Reboot failed -- System halted\n"); + while (1); +} + +/* + * This is used if a sys-off handler was not set by a power management + * driver, in this case we can assume we are on a simulator. On + * OpenRISC simulators l.nop 1 will trigger the simulator exit. + */ +static void default_power_off(void) { - printk(KERN_INFO "*** MACHINE RESTART ***\n"); __asm__("l.nop 1"); } @@ -71,7 +90,8 @@ void machine_halt(void) void machine_power_off(void) { printk(KERN_INFO "*** MACHINE POWER OFF ***\n"); - __asm__("l.nop 1"); + do_kernel_power_off(); + default_power_off(); } /* @@ -80,12 +100,13 @@ void machine_power_off(void) */ void arch_cpu_idle(void) { - local_irq_enable(); + raw_local_irq_enable(); if (mfspr(SPR_UPR) & SPR_UPR_PMP) mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); + raw_local_irq_disable(); } -void (*pm_power_off) (void) = machine_power_off; +void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); /* @@ -99,17 +120,11 @@ void flush_thread(void) void show_regs(struct pt_regs *regs) { - extern void show_registers(struct pt_regs *regs); - show_regs_print_info(KERN_DEFAULT); /* __PHX__ cleanup this mess */ show_registers(regs); } -void release_thread(struct task_struct *dead_task) -{ -} - /* * Copy the thread-specific (arch specific) info from the current * process to the new one p @@ -122,7 +137,7 @@ extern asmlinkage void ret_from_fork(void); * @usp: user stack pointer or fn for kernel thread * @arg: arg to fn for kernel thread; always NULL for userspace thread * @p: the newly created task - * @regs: CPU context to copy for userspace thread; always NULL for kthread + * @tls: the Thread Local Storage pointer for the new process * * At the top of a newly initialized kernel stack are two stacked pt_reg * structures. The first (topmost) is the userspace context of the thread. @@ -148,9 +163,11 @@ extern asmlinkage void ret_from_fork(void); */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) +copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; struct pt_regs *userregs; struct pt_regs *kregs; unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; @@ -168,10 +185,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *)sp; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(args->fn)) { memset(kregs, 0, sizeof(struct pt_regs)); - kregs->gpr[20] = usp; /* fn, kernel thread */ - kregs->gpr[22] = arg; + kregs->gpr[20] = (unsigned long)args->fn; + kregs->gpr[22] = (unsigned long)args->fn_arg; } else { *userregs = *current_pt_regs(); @@ -179,16 +196,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, userregs->sp = usp; /* - * For CLONE_SETTLS set "tp" (r10) to the TLS pointer passed to sys_clone. - * - * The kernel entry is: - * int clone (long flags, void *child_stack, int *parent_tid, - * int *child_tid, struct void *tls) - * - * This makes the source r7 in the kernel registers. + * For CLONE_SETTLS set "tp" (r10) to the TLS pointer. */ if (clone_flags & CLONE_SETTLS) - userregs->gpr[10] = userregs->gpr[7]; + userregs->gpr[10] = tls; userregs->gpr[11] = 0; /* Result from fork() */ @@ -221,13 +232,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) regs->sp = sp; } -/* Fill in the fpu structure for a core dump. */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) -{ - /* TODO */ - return 0; -} - extern struct thread_info *_switch(struct thread_info *old_ti, struct thread_info *new_ti); extern int lwa_flag; @@ -241,6 +245,8 @@ struct task_struct *__switch_to(struct task_struct *old, local_irq_save(flags); + save_fpu(current); + /* current_set is an array of saved current pointers * (one for each cpu). we need them at user->kernel transition, * while we save them at kernel->user transition @@ -253,6 +259,8 @@ struct task_struct *__switch_to(struct task_struct *old, current_thread_info_set[smp_processor_id()] = new_ti; last = (_switch(old_ti, new_ti))->task; + restore_fpu(current); + local_irq_restore(flags); return last; @@ -272,7 +280,7 @@ void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs) dest[35] = 0; } -unsigned long get_wchan(struct task_struct *p) +unsigned long __get_wchan(struct task_struct *p) { /* TODO */ diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c index 19e6008bf114..e424e9bd12a7 100644 --- a/arch/openrisc/kernel/prom.c +++ b/arch/openrisc/kernel/prom.c @@ -22,6 +22,6 @@ void __init early_init_devtree(void *params) { - early_init_dt_scan(params); + early_init_dt_scan(params, __pa(params)); memblock_allow_resize(); } diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 6a5a91c76338..8430570d0620 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -22,12 +22,14 @@ #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/regset.h> -#include <linux/tracehook.h> #include <linux/elf.h> #include <asm/thread_info.h> #include <asm/page.h> -#include <asm/pgtable.h> + +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); + +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); /* * Copy the thread state to a regset that can be interpreted by userspace. @@ -45,29 +47,15 @@ */ static int genregs_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user * ubuf) + struct membuf to) { const struct pt_regs *regs = task_pt_regs(target); - int ret; /* r0 */ - ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4); - - if (!ret) - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - regs->gpr+1, 4, 4*32); - if (!ret) - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - ®s->pc, 4*32, 4*33); - if (!ret) - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - ®s->sr, 4*33, 4*34); - if (!ret) - ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - 4*34, -1); - - return ret; + membuf_zero(&to, 4); + membuf_write(&to, regs->gpr + 1, 31 * 4); + membuf_store(&to, regs->pc); + return membuf_store(&to, regs->sr); } /* @@ -82,10 +70,9 @@ static int genregs_set(struct task_struct *target, int ret; /* ignore r0 */ - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); /* r1 - r31 */ - if (!ret) - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->gpr+1, 4, 4*32); /* PC */ if (!ret) @@ -96,17 +83,43 @@ static int genregs_set(struct task_struct *target, * the Supervision register */ if (!ret) - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 4*33, -1); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 4*33, -1); return ret; } +#ifdef CONFIG_FPU +/* + * As OpenRISC shares GPRs and floating point registers we don't need to export + * the floating point registers again. So here we only export the fpcsr special + * purpose register. + */ +static int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_store(&to, target->thread.fpcsr); +} + +static int fpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + /* FPCSR */ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpcsr, 0, 4); +} +#endif + /* * Define the register sets available on OpenRISC under Linux */ enum or1k_regset { REGSET_GENERAL, +#ifdef CONFIG_FPU + REGSET_FPU, +#endif }; static const struct user_regset or1k_regsets[] = { @@ -115,9 +128,19 @@ static const struct user_regset or1k_regsets[] = { .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), - .get = genregs_get, + .regset_get = genregs_get, .set = genregs_set, }, +#ifdef CONFIG_FPU + [REGSET_FPU] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct __or1k_fpu_state) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .regset_get = fpregs_get, + .set = fpregs_set, + }, +#endif }; static const struct user_regset_view user_or1k_native_view = { @@ -137,6 +160,102 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) * in exit.c or in signal.c. */ +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(gpr2), + REG_OFFSET_NAME(gpr3), + REG_OFFSET_NAME(gpr4), + REG_OFFSET_NAME(gpr5), + REG_OFFSET_NAME(gpr6), + REG_OFFSET_NAME(gpr7), + REG_OFFSET_NAME(gpr8), + REG_OFFSET_NAME(gpr9), + REG_OFFSET_NAME(gpr10), + REG_OFFSET_NAME(gpr11), + REG_OFFSET_NAME(gpr12), + REG_OFFSET_NAME(gpr13), + REG_OFFSET_NAME(gpr14), + REG_OFFSET_NAME(gpr15), + REG_OFFSET_NAME(gpr16), + REG_OFFSET_NAME(gpr17), + REG_OFFSET_NAME(gpr18), + REG_OFFSET_NAME(gpr19), + REG_OFFSET_NAME(gpr20), + REG_OFFSET_NAME(gpr21), + REG_OFFSET_NAME(gpr22), + REG_OFFSET_NAME(gpr23), + REG_OFFSET_NAME(gpr24), + REG_OFFSET_NAME(gpr25), + REG_OFFSET_NAME(gpr26), + REG_OFFSET_NAME(gpr27), + REG_OFFSET_NAME(gpr28), + REG_OFFSET_NAME(gpr29), + REG_OFFSET_NAME(gpr30), + REG_OFFSET_NAME(gpr31), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(orig_gpr11), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} /* * Called by kernel/ptrace.c when detaching.. @@ -174,7 +293,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -196,5 +315,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index d668f5be3a99..be56eaafc8b9 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -35,7 +35,6 @@ #include <linux/device.h> #include <asm/sections.h> -#include <asm/pgtable.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/io.h> @@ -49,17 +48,12 @@ static void __init setup_memory(void) unsigned long ram_start_pfn; unsigned long ram_end_pfn; phys_addr_t memory_start, memory_end; - struct memblock_region *region; memory_end = memory_start = 0; /* Find main memory where is the kernel, we assume its the only one */ - for_each_memblock(memory, region) { - memory_start = region->base; - memory_end = region->base + region->size; - printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, - memory_start, memory_end); - } + memory_start = memblock_start_of_DRAM(); + memory_end = memblock_end_of_DRAM(); if (!memory_end) { panic("No memory!"); @@ -81,6 +75,16 @@ static void __init setup_memory(void) */ memblock_reserve(__pa(_stext), _end - _stext); +#ifdef CONFIG_BLK_DEV_INITRD + /* Then reserve the initrd, if any */ + if (initrd_start && (initrd_end > initrd_start)) { + unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE); + unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE); + + memblock_reserve(__pa(aligned_start), aligned_end - aligned_start); + } +#endif /* CONFIG_BLK_DEV_INITRD */ + early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); @@ -148,21 +152,6 @@ static void print_cpuinfo(void) printk(KERN_INFO "-- custom unit(s)\n"); } -static struct device_node *setup_find_cpu_node(int cpu) -{ - u32 hwid; - struct device_node *cpun; - - for_each_of_cpu_node(cpun) { - if (of_property_read_u32(cpun, "reg", &hwid)) - continue; - if (hwid == cpu) - return cpun; - } - - return NULL; -} - void __init setup_cpuinfo(void) { struct device_node *cpu; @@ -171,7 +160,7 @@ void __init setup_cpuinfo(void) int cpu_id = smp_processor_id(); struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id]; - cpu = setup_find_cpu_node(cpu_id); + cpu = of_get_cpu_node(cpu_id, NULL); if (!cpu) panic("Couldn't find CPU%d in device tree...\n", cpu_id); @@ -205,7 +194,8 @@ void __init setup_cpuinfo(void) } /** - * or32_early_setup + * or1k_early_setup + * @fdt: pointer to the start of the device tree in memory or NULL * * Handles the pointer to the device tree that this kernel is to use * for establishing the available platform devices. @@ -213,7 +203,7 @@ void __init setup_cpuinfo(void) * Falls back on built-in device tree in case null pointer is passed. */ -void __init or32_early_setup(void *fdt) +void __init or1k_early_setup(void *fdt) { if (fdt) pr_info("FDT at %p\n", fdt); @@ -239,21 +229,6 @@ static inline unsigned long extract_value(unsigned long reg, unsigned long mask) return mask & reg; } -void __init detect_unit_config(unsigned long upr, unsigned long mask, - char *text, void (*func) (void)) -{ - if (text != NULL) - printk("%s", text); - - if (upr & mask) { - if (func != NULL) - func(); - else - printk("present\n"); - } else - printk("not present\n"); -} - /* * calibrate_delay * @@ -265,7 +240,7 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask, void calibrate_delay(void) { const int *val; - struct device_node *cpu = setup_find_cpu_node(smp_processor_id()); + struct device_node *cpu = of_get_cpu_node(smp_processor_id(), NULL); val = of_get_property(cpu, "clock-frequency", NULL); if (!val) @@ -274,10 +249,15 @@ void calibrate_delay(void) pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); + + of_node_put(cpu); } void __init setup_arch(char **cmdline_p) { + /* setup memblock allocator */ + setup_memory(); + unflatten_and_copy_device_tree(); setup_cpuinfo(); @@ -287,32 +267,23 @@ void __init setup_arch(char **cmdline_p) #endif /* process 1's initial memory region is the kernel code/data */ - init_mm.start_code = (unsigned long)_stext; - init_mm.end_code = (unsigned long)_etext; - init_mm.end_data = (unsigned long)_edata; - init_mm.brk = (unsigned long)_end; + setup_initial_init_mm(_stext, _etext, _edata, _end); #ifdef CONFIG_BLK_DEV_INITRD - initrd_start = (unsigned long)&__initrd_start; - initrd_end = (unsigned long)&__initrd_end; if (initrd_start == initrd_end) { + printk(KERN_INFO "Initial ramdisk not found\n"); initrd_start = 0; initrd_end = 0; + } else { + printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), initrd_end - initrd_start); + initrd_below_start_ok = 1; } - initrd_below_start_ok = 1; #endif - /* setup memblock allocator */ - setup_memory(); - /* paging_init() sets up the MMU and marks all pages as reserved */ paging_init(); -#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) - if (!conswitchp) - conswitchp = &dummy_con; -#endif - *cmdline_p = boot_command_line; printk(KERN_INFO "OpenRISC Linux -- http://openrisc.io\n"); diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 4f0754874d78..f70a13ee0593 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -21,21 +21,56 @@ #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> -#include <linux/tracehook.h> +#include <linux/resume_user_mode.h> +#include <asm/fpu.h> #include <asm/processor.h> #include <asm/syscall.h> #include <asm/ucontext.h> #include <linux/uaccess.h> -#define DEBUG_SIG 0 - struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned char retcode[16]; /* trampoline code */ }; +asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs); + +asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, + int syscall); + +#ifdef CONFIG_FPU +static long restore_fp_state(struct sigcontext __user *sc) +{ + long err; + + err = __copy_from_user(¤t->thread.fpcsr, &sc->fpcsr, sizeof(unsigned long)); + if (unlikely(err)) + return err; + + /* Restore the FPU state */ + restore_fpu(current); + + return 0; +} + +static long save_fp_state(struct sigcontext __user *sc) +{ + long err; + + /* Sync the user FPU state so we can copy to sigcontext */ + save_fpu(current); + + err = __copy_to_user(&sc->fpcsr, ¤t->thread.fpcsr, sizeof(unsigned long)); + + return err; +} +#else +#define save_fp_state(sc) (0) +#define restore_fp_state(sc) (0) +#endif + static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { @@ -52,6 +87,7 @@ static int restore_sigcontext(struct pt_regs *regs, err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long)); err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long)); + err |= restore_fp_state(sc); /* make sure the SM-bit is cleared so user-mode cannot fool us */ regs->sr &= ~SPR_SR_SM; @@ -68,7 +104,7 @@ static int restore_sigcontext(struct pt_regs *regs, asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) { - struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp; + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp; sigset_t set; /* @@ -76,7 +112,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) * then frame should be dword aligned here. If it's * not, then the user is trying to mess with us. */ - if (((long)frame) & 3) + if (((unsigned long)frame) & 3) goto badframe; if (!access_ok(frame, sizeof(*frame))) @@ -114,6 +150,7 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); + err |= save_fp_state(sc); return err; } @@ -151,7 +188,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; unsigned long return_ip; int err = 0; @@ -181,10 +218,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, l.ori r11,r0,__NR_sigreturn l.sys 1 */ - err |= __put_user(0xa960, (short *)(frame->retcode + 0)); - err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); - err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); - err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); + err |= __put_user(0xa960, (short __user *)(frame->retcode + 0)); + err |= __put_user(__NR_rt_sigreturn, (short __user *)(frame->retcode + 2)); + err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4)); + err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8)); if (err) return -EFAULT; @@ -207,6 +244,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) { int ret; + rseq_signal_deliver(ksig, regs); + ret = setup_rt_frame(ksig, sigmask_to_save(), regs); signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); @@ -224,7 +263,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * mode below. */ -int do_signal(struct pt_regs *regs, int syscall) +static int do_signal(struct pt_regs *regs, int syscall) { struct ksignal ksig; unsigned long continue_addr = 0; @@ -244,7 +283,7 @@ int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart = -2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: @@ -299,7 +338,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* @@ -311,12 +350,11 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } syscall = 0; } else { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } } local_irq_disable(); - thread_flags = current_thread_info()->flags; + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); return 0; } diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index 7d518ee8bddc..86da4bc5ee0b 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -14,13 +14,17 @@ #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/irq.h> +#include <linux/of.h> #include <asm/cpuinfo.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/time.h> +asmlinkage __init void secondary_start_kernel(void); + static void (*smp_cross_call)(const struct cpumask *, unsigned int); unsigned long secondary_release = -1; @@ -53,28 +57,30 @@ static void boot_secondary(unsigned int cpu, struct task_struct *idle) spin_unlock(&boot_lock); } -void __init smp_prepare_boot_cpu(void) -{ -} - void __init smp_init_cpus(void) { - int i; + struct device_node *cpu; + u32 cpu_id; - for (i = 0; i < NR_CPUS; i++) - set_cpu_possible(i, true); + for_each_of_cpu_node(cpu) { + cpu_id = of_get_cpu_hwid(cpu, 0); + if (cpu_id < NR_CPUS) + set_cpu_possible(cpu_id, true); + } } void __init smp_prepare_cpus(unsigned int max_cpus) { - int i; + unsigned int cpu; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); + for_each_possible_cpu(cpu) { + if (cpu < max_cpus) + set_cpu_present(cpu, true); + } } void __init smp_cpus_done(unsigned int max_cpus) @@ -113,7 +119,7 @@ asmlinkage __init void secondary_start_kernel(void) * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); @@ -133,8 +139,6 @@ asmlinkage __init void secondary_start_kernel(void) set_cpu_online(cpu, true); local_irq_enable(); - - preempt_disable(); /* * OK, it's off to the idle thread for us */ @@ -167,7 +171,7 @@ void handle_IPI(unsigned int ipi_msg) } } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -191,12 +195,6 @@ void smp_send_stop(void) smp_call_function(stop_this_cpu, NULL, 0); } -/* not supported, yet */ -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} - void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { smp_cross_call = fn; @@ -218,30 +216,101 @@ static inline void ipi_flush_tlb_all(void *ignored) local_flush_tlb_all(); } +static inline void ipi_flush_tlb_mm(void *info) +{ + struct mm_struct *mm = (struct mm_struct *)info; + + local_flush_tlb_mm(mm); +} + +static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm) +{ + unsigned int cpuid; + + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + local_flush_tlb_mm(mm); + } else { + on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1); + } + put_cpu(); +} + +struct flush_tlb_data { + unsigned long addr1; + unsigned long addr2; +}; + +static inline void ipi_flush_tlb_page(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_page(NULL, fd->addr1); +} + +static inline void ipi_flush_tlb_range(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_range(NULL, fd->addr1, fd->addr2); +} + +static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start, + unsigned long end) +{ + unsigned int cpuid; + + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + if ((end - start) <= PAGE_SIZE) + local_flush_tlb_page(NULL, start); + else + local_flush_tlb_range(NULL, start, end); + } else { + struct flush_tlb_data fd; + + fd.addr1 = start; + fd.addr2 = end; + + if ((end - start) <= PAGE_SIZE) + on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1); + else + on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1); + } + put_cpu(); +} + void flush_tlb_all(void) { on_each_cpu(ipi_flush_tlb_all, NULL, 1); } -/* - * FIXME: implement proper functionality instead of flush_tlb_all. - * *But*, as things currently stands, the local_tlb_flush_* functions will - * all boil down to local_tlb_flush_all anyway. - */ void flush_tlb_mm(struct mm_struct *mm) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + smp_flush_tlb_mm(mm_cpumask(mm), mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm) + : cpu_online_mask; + smp_flush_tlb_range(cmask, start, end); } /* Instruction cache invalidate - performed on each cpu */ diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7..54d38809e22c 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/processor.h> @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c index 3d18008310e4..b2f57e2538f7 100644 --- a/arch/openrisc/kernel/sys_call_table.c +++ b/arch/openrisc/kernel/sys_call_table.c @@ -16,9 +16,14 @@ #include <asm/syscalls.h> -#undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) + +#define sys_mmap2 sys_mmap_pgoff +#define sys_clone __sys_clone +#define sys_clone3 __sys_clone3 +#define sys_fork __sys_fork void *sys_call_table[__NR_syscalls] = { -#include <asm/unistd.h> +#include <asm/syscall_table_32.h> }; diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c index b82866061958..764c7bfb5df3 100644 --- a/arch/openrisc/kernel/time.c +++ b/arch/openrisc/kernel/time.c @@ -20,8 +20,12 @@ #include <linux/clockchips.h> #include <linux/irq.h> #include <linux/io.h> +#include <linux/of_clk.h> #include <asm/cpuinfo.h> +#include <asm/time.h> + +irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs); /* Test the timer ticks to count, used in sync routine */ inline void openrisc_timer_set(unsigned long count) @@ -60,7 +64,7 @@ static int openrisc_timer_set_next_event(unsigned long delta, * timers) we cannot enable the PERIODIC feature. The tick timer can run using * one-shot events, so no problem. */ -DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); +static DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); void openrisc_clockevent_init(void) { @@ -127,7 +131,7 @@ irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) return IRQ_HANDLED; } -/** +/* * Clocksource: Based on OpenRISC timer/counter * * This sets up the OpenRISC Tick Timer as a clock source. The tick timer @@ -169,4 +173,7 @@ void __init time_init(void) openrisc_timer_init(); openrisc_clockevent_init(); + + of_clk_init(NULL); + timer_probe(); } diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 932a8ec2b520..c195be9cc9fc 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -30,36 +30,48 @@ #include <linux/kallsyms.h> #include <linux/uaccess.h> +#include <asm/bug.h> +#include <asm/fpu.h> #include <asm/io.h> -#include <asm/pgtable.h> +#include <asm/processor.h> #include <asm/unwinder.h> #include <asm/sections.h> -int kstack_depth_to_print = 0x180; int lwa_flag; -unsigned long __user *lwa_addr; +static unsigned long __user *lwa_addr; -void print_trace(void *data, unsigned long addr, int reliable) +asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector); +asmlinkage void do_trap(struct pt_regs *regs, unsigned long address); +asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address); +asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address); +asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address); +asmlinkage void do_illegal_instruction(struct pt_regs *regs, + unsigned long address); + +static void print_trace(void *data, unsigned long addr, int reliable) +{ + const char *loglvl = data; + + pr_info("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ", + (void *) addr); +} + +static void print_data(unsigned long base_addr, unsigned long word, int i) { - pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ", - (void *) addr); + if (i == 0) + pr_info("(%08lx:)\t%08lx", base_addr + (i * 4), word); + else + pr_info(" %08lx:\t%08lx", base_addr + (i * 4), word); } /* displays a short stack trace */ -void show_stack(struct task_struct *task, unsigned long *esp) +void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl) { if (esp == NULL) esp = (unsigned long *)&esp; - pr_emerg("Call trace:\n"); - unwind_stack(NULL, esp, print_trace); -} - -void show_trace_task(struct task_struct *tsk) -{ - /* - * TODO: SysRq-T trace dump... - */ + pr_info("%sCall trace:\n", loglvl); + unwind_stack((void *)loglvl, esp, print_trace); } void show_registers(struct pt_regs *regs) @@ -72,145 +84,83 @@ void show_registers(struct pt_regs *regs) if (user_mode(regs)) in_kernel = 0; - printk("CPU #: %d\n" - " PC: %08lx SR: %08lx SP: %08lx\n", - smp_processor_id(), regs->pc, regs->sr, regs->sp); - printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", - 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); - printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", - regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); - printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", - regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); - printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", - regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); - printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", - regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); - printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", - regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); - printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", - regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); - printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", - regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); - printk(" RES: %08lx oGPR11: %08lx\n", - regs->gpr[11], regs->orig_gpr11); - - printk("Process %s (pid: %d, stackpage=%08lx)\n", - current->comm, current->pid, (unsigned long)current); + pr_info("CPU #: %d\n" + " PC: %08lx SR: %08lx SP: %08lx\n", + smp_processor_id(), regs->pc, regs->sr, regs->sp); + pr_info("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", + 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); + pr_info("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", + regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); + pr_info("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", + regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); + pr_info("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", + regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); + pr_info("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", + regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); + pr_info("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", + regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); + pr_info("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", + regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); + pr_info("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", + regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); + pr_info(" RES: %08lx oGPR11: %08lx\n", + regs->gpr[11], regs->orig_gpr11); + + pr_info("Process %s (pid: %d, stackpage=%08lx)\n", + current->comm, current->pid, (unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (in_kernel) { - printk("\nStack: "); - show_stack(NULL, (unsigned long *)esp); + pr_info("\nStack: "); + show_stack(NULL, (unsigned long *)esp, KERN_EMERG); - printk("\nCode: "); - if (regs->pc < PAGE_OFFSET) - goto bad; + if (esp < PAGE_OFFSET) + goto bad_stack; - for (i = -24; i < 24; i++) { - unsigned char c; - if (__get_user(c, &((unsigned char *)regs->pc)[i])) { -bad: - printk(" Bad PC value."); + pr_info("\n"); + for (i = -8; i < 24; i += 1) { + unsigned long word; + + if (__get_user(word, &((unsigned long *)esp)[i])) { +bad_stack: + pr_info(" Bad Stack value."); break; } - if (i == 0) - printk("(%02x) ", c); - else - printk("%02x ", c); + print_data(esp, word, i); } - } - printk("\n"); -} -void nommu_dump_state(struct pt_regs *regs, - unsigned long ea, unsigned long vector) -{ - int i; - unsigned long addr, stack = regs->sp; - - printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector); - - printk("CPU #: %d\n" - " PC: %08lx SR: %08lx SP: %08lx\n", - 0, regs->pc, regs->sr, regs->sp); - printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", - 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); - printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", - regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); - printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", - regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); - printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", - regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); - printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", - regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); - printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", - regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); - printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", - regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); - printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", - regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); - printk(" RES: %08lx oGPR11: %08lx\n", - regs->gpr[11], regs->orig_gpr11); - - printk("Process %s (pid: %d, stackpage=%08lx)\n", - ((struct task_struct *)(__pa(current)))->comm, - ((struct task_struct *)(__pa(current)))->pid, - (unsigned long)current); - - printk("\nStack: "); - printk("Stack dump [0x%08lx]:\n", (unsigned long)stack); - for (i = 0; i < kstack_depth_to_print; i++) { - if (((long)stack & (THREAD_SIZE - 1)) == 0) - break; - stack++; - - printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4, - *((unsigned long *)(__pa(stack)))); - } - printk("\n"); - - printk("Call Trace: "); - i = 1; - while (((long)stack & (THREAD_SIZE - 1)) != 0) { - addr = *((unsigned long *)__pa(stack)); - stack++; - - if (kernel_text_address(addr)) { - if (i && ((i % 6) == 0)) - printk("\n "); - printk(" [<%08lx>]", addr); - i++; - } - } - printk("\n"); + pr_info("\nCode: "); + if (regs->pc < PAGE_OFFSET) + goto bad; - printk("\nCode: "); + for (i = -6; i < 6; i += 1) { + unsigned long word; - for (i = -24; i < 24; i++) { - unsigned char c; - c = ((unsigned char *)(__pa(regs->pc)))[i]; + if (__get_user(word, &((unsigned long *)regs->pc)[i])) { +bad: + pr_info(" Bad PC value."); + break; + } - if (i == 0) - printk("(%02x) ", c); - else - printk("%02x ", c); + print_data(regs->pc, word, i); + } } - printk("\n"); + pr_info("\n"); } /* This is normally the 'Oops' routine */ -void die(const char *str, struct pt_regs *regs, long err) +void __noreturn die(const char *str, struct pt_regs *regs, long err) { console_verbose(); - printk("\n%s#: %04lx\n", str, err & 0xffff); + pr_emerg("\n%s#: %04lx\n", str, err & 0xffff); show_registers(regs); #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION - printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); + pr_emerg("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); /* shut down interrupts */ local_irq_disable(); @@ -218,35 +168,56 @@ void die(const char *str, struct pt_regs *regs, long err) __asm__ __volatile__("l.nop 1"); do {} while (1); #endif - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } -/* This is normally the 'Oops' routine */ -void die_if_kernel(const char *str, struct pt_regs *regs, long err) +asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector) { - if (user_mode(regs)) - return; - - die(str, regs, err); -} - -void unhandled_exception(struct pt_regs *regs, int ea, int vector) -{ - printk("Unable to handle exception at EA =0x%x, vector 0x%x", - ea, vector); + pr_emerg("Unable to handle exception at EA =0x%x, vector 0x%x", + ea, vector); die("Oops", regs, 9); } -void __init trap_init(void) +asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address) { - /* Nothing needs to be done */ + if (user_mode(regs)) { + int code = FPE_FLTUNK; +#ifdef CONFIG_FPU + unsigned long fpcsr; + + save_fpu(current); + fpcsr = current->thread.fpcsr; + + if (fpcsr & SPR_FPCSR_IVF) + code = FPE_FLTINV; + else if (fpcsr & SPR_FPCSR_OVF) + code = FPE_FLTOVF; + else if (fpcsr & SPR_FPCSR_UNF) + code = FPE_FLTUND; + else if (fpcsr & SPR_FPCSR_DZF) + code = FPE_FLTDIV; + else if (fpcsr & SPR_FPCSR_IXF) + code = FPE_FLTRES; + + /* Clear all flags */ + current->thread.fpcsr &= ~SPR_FPCSR_ALLF; + restore_fpu(current); +#endif + force_sig_fault(SIGFPE, code, (void __user *)regs->pc); + } else { + pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc); + die("Die:", regs, SIGFPE); + } } asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) { - force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)address); - - regs->pc += 4; + if (user_mode(regs)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + } else { + pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc); + die("Die:", regs, SIGILL); + } } asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) @@ -255,8 +226,7 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) /* Send a SIGBUS */ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address); } else { - printk("KERNEL: Unaligned Access 0x%.8lx\n", address); - show_registers(regs); + pr_emerg("KERNEL: Unaligned Access 0x%.8lx\n", address); die("Die:", regs, address); } @@ -268,8 +238,7 @@ asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) /* Send a SIGBUS */ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); } else { /* Kernel mode */ - printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); - show_registers(regs); + pr_emerg("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); die("Die:", regs, address); } } @@ -463,9 +432,8 @@ asmlinkage void do_illegal_instruction(struct pt_regs *regs, /* Send a SIGILL */ force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address); } else { /* Kernel mode */ - printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", - address); - show_registers(regs); + pr_emerg("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", + address); die("Die:", regs, address); } } diff --git a/arch/openrisc/kernel/unwinder.c b/arch/openrisc/kernel/unwinder.c index 8ae15c2c1845..c6ad6f867a6a 100644 --- a/arch/openrisc/kernel/unwinder.c +++ b/arch/openrisc/kernel/unwinder.c @@ -25,7 +25,7 @@ struct or1k_frameinfo { /* * Verify a frameinfo structure. The return address should be a valid text * address. The frame pointer may be null if its the last frame, otherwise - * the frame pointer should point to a location in the stack after the the + * the frame pointer should point to a location in the stack after the * top of the next frame up. */ static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo) diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 60449fd7f16f..049bff45f612 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -50,9 +50,9 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { _stext = .; + HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT @@ -84,8 +84,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; - HEAD_TEXT_SECTION - /* Page aligned */ INIT_TEXT_SECTION(PAGE_SIZE) @@ -96,18 +94,6 @@ SECTIONS __init_end = .; - . = ALIGN(PAGE_SIZE); - .initrd : AT(ADDR(.initrd) - LOAD_OFFSET) - { - __initrd_start = .; - *(.initrd) - __initrd_end = .; - FILL (0); - . = ALIGN (PAGE_SIZE); - } - - __vmlinux_end = .; /* last address of the physical file */ - BSS_SECTION(0, 0, 0x20) _end = .; @@ -115,6 +101,7 @@ SECTIONS /* Throw in the debugging sections */ STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* Sections to be discarded -- must be last */ DISCARDS diff --git a/arch/openrisc/lib/Makefile b/arch/openrisc/lib/Makefile index 79775aaa6baa..53327406b483 100644 --- a/arch/openrisc/lib/Makefile +++ b/arch/openrisc/lib/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Makefile for or32 specific library files.. +# Makefile for or1k specific library files.. # obj-y := delay.o string.o memset.o memcpy.o diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c index 036ae57180ef..5e89e4131304 100644 --- a/arch/openrisc/lib/delay.c +++ b/arch/openrisc/lib/delay.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> +#include <linux/timex.h> #include <asm/param.h> #include <asm/delay.h> #include <asm/timex.h> diff --git a/arch/openrisc/lib/memcpy.c b/arch/openrisc/lib/memcpy.c index fe2177628fd9..e2af9b510804 100644 --- a/arch/openrisc/lib/memcpy.c +++ b/arch/openrisc/lib/memcpy.c @@ -101,7 +101,7 @@ void *memcpy(void *dest, __const void *src, __kernel_size_t n) */ void *memcpy(void *dest, __const void *src, __kernel_size_t n) { - unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src; + unsigned char *d, *s; uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src; /* If both source and dest are word aligned copy words */ diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 08f56af387ac..eb43b73f3855 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -16,7 +16,7 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); @@ -43,15 +43,19 @@ void update_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); + struct folio *folio = page_folio(pfn_to_page(pfn)); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); /* * Since icaches do not snoop for updated data on OpenRISC, we * must write back and invalidate any dirty pages manually. We * can skip data pages, since they will not end up in icaches. */ - if ((vma->vm_flags & VM_EXEC) && dirty) - sync_icache_dcache(page); + if ((vma->vm_flags & VM_EXEC) && dirty) { + unsigned int nr = folio_nr_pages(folio); + + while (nr--) + sync_icache_dcache(folio_page(folio, nr)); + } } diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 5d4d3a9691d0..29e232d78d82 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -15,23 +15,24 @@ #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/sched/signal.h> +#include <linux/perf_event.h> #include <linux/uaccess.h> +#include <asm/bug.h> +#include <asm/mmu_context.h> #include <asm/siginfo.h> #include <asm/signal.h> #define NUM_TLB_ENTRIES 64 #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1)) -unsigned long pte_misses; /* updated by do_page_fault() */ -unsigned long pte_errors; /* updated by do_page_fault() */ - /* __PHX__ :: - check the vmalloc_fault in do_page_fault() - * - also look into include/asm-or32/mmu_context.h + * - also look into include/asm/mmu_context.h */ volatile pgd_t *current_pgd[NR_CPUS]; -extern void die(char *, struct pt_regs *, long); +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long vector, int write_acc); /* * This routine handles page faults. It determines the address, @@ -50,7 +51,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma; int si_code; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; tsk = current; @@ -103,8 +104,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, if (in_interrupt() || !mm) goto no_context; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -126,8 +129,9 @@ retry: if (address + PAGE_SIZE < regs->sp) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so @@ -159,9 +163,16 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -174,26 +185,19 @@ good_area: BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - /*RGD modeled on Cris */ - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - - /* No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } + /*RGD modeled on Cris */ + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -202,7 +206,7 @@ good_area: */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: @@ -227,8 +231,6 @@ no_context: { const struct exception_table_entry *entry; - __asm__ __volatile__("l.nop 42"); - if ((entry = search_exception_tables(regs->pc)) != NULL) { /* Adjust the instruction pointer in the stackframe */ regs->pc = entry->fixup; @@ -250,25 +252,20 @@ no_context: die("Oops", regs, write_acc); - do_exit(SIGKILL); - /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: - __asm__ __volatile__("l.nop 42"); - __asm__ __volatile__("l.nop 1"); - - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Send a sigbus, regardless of whether we were in kernel @@ -296,6 +293,7 @@ vmalloc_fault: int offset = pgd_index(address); pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; @@ -322,8 +320,13 @@ vmalloc_fault: * it exists. */ - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d_k)) + goto no_context; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); if (!pud_present(*pud_k)) goto no_context; diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 1f87b524db78..be1c2eb8bb94 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -25,37 +25,29 @@ #include <linux/memblock.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/blkdev.h> /* for initrd_* */ #include <linux/pagemap.h> #include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/tlb.h> #include <asm/mmu_context.h> -#include <asm/kmap_types.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <asm/sections.h> int mem_init_done; -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - static void __init zone_sizes_init(void) { - unsigned long zones_size[MAX_NR_ZONES]; - - /* Clear the zone sizes */ - memset(zones_size, 0, sizeof(zones_size)); + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; /* * We use only ZONE_NORMAL */ - zones_size[ZONE_NORMAL] = max_low_pfn; + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; - free_area_init(zones_size); + free_area_init(max_zone_pfn); } extern const char _s_kernel_ro[], _e_kernel_ro[]; @@ -68,29 +60,32 @@ extern const char _s_kernel_ro[], _e_kernel_ro[]; */ static void __init map_ram(void) { + phys_addr_t start, end; unsigned long v, p, e; pgprot_t prot; pgd_t *pge; + p4d_t *p4e; pud_t *pue; pmd_t *pme; pte_t *pte; + u64 i; /* These mark extents of read-only kernel pages... * ...from vmlinux.lds.S */ - struct memblock_region *region; v = PAGE_OFFSET; - for_each_memblock(memory, region) { - p = (u32) region->base & PAGE_MASK; - e = p + (u32) region->size; + for_each_mem_range(i, &start, &end) { + p = (u32) start & PAGE_MASK; + e = (u32) end; v = (u32) __va(p); pge = pgd_offset_k(v); while (p < e) { int j; - pue = pud_offset(pge, v); + p4e = p4d_offset(pge, v); + pue = pud_offset(p4e, v); pme = pmd_offset(pue, v); if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { @@ -122,15 +117,12 @@ static void __init map_ram(void) } printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, - region->base, region->base + region->size); + start, end); } } void __init paging_init(void) { - extern void tlb_init(void); - - unsigned long end; int i; printk(KERN_INFO "Setting up paging and PTEs.\n"); @@ -146,8 +138,6 @@ void __init paging_init(void) */ current_pgd[smp_processor_id()] = init_mm.pgd; - end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); - map_ram(); zone_sizes_init(); @@ -203,18 +193,67 @@ void __init mem_init(void) { BUG_ON(!mem_map); - max_mapnr = max_low_pfn; - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); - /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - memblock_free_all(); - - mem_init_print_info(NULL); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; } + +static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot) +{ + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + p4d = p4d_offset(pgd_offset_k(va), va); + pud = pud_offset(p4d, va); + pmd = pmd_offset(pud, va); + pte = pte_alloc_kernel(pmd, va); + + if (pte == NULL) + return -ENOMEM; + + if (pgprot_val(prot)) + set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot)); + else + pte_clear(&init_mm, va, pte); + + local_flush_tlb_page(NULL, va); + return 0; +} + +void __init __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + map_page(address, phys, prot); +} + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index 8f8e97f7eac9..8e63e86251ca 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -13,99 +13,16 @@ #include <linux/vmalloc.h> #include <linux/io.h> +#include <linux/pgtable.h> #include <asm/pgalloc.h> -#include <asm/kmap_types.h> #include <asm/fixmap.h> #include <asm/bug.h> -#include <asm/pgtable.h> #include <linux/sched.h> #include <asm/tlbflush.h> extern int mem_init_done; -static unsigned int fixmaps_used __initdata; - /* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size) -{ - phys_addr_t p; - unsigned long v; - unsigned long offset, last_addr; - struct vm_struct *area = NULL; - - /* Don't allow wraparound or zero size */ - last_addr = addr + size - 1; - if (!size || last_addr < addr) - return NULL; - - /* - * Mappings have to be page-aligned - */ - offset = addr & ~PAGE_MASK; - p = addr & PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - p; - - if (likely(mem_init_done)) { - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - v = (unsigned long)area->addr; - } else { - if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS) - return NULL; - v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used); - fixmaps_used += (size >> PAGE_SHIFT); - } - - if (ioremap_page_range(v, v + size, p, - __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) { - if (likely(mem_init_done)) - vfree(area->addr); - else - fixmaps_used -= (size >> PAGE_SHIFT); - return NULL; - } - - return (void __iomem *)(offset + (char *)v); -} -EXPORT_SYMBOL(ioremap); - -void iounmap(void *addr) -{ - /* If the page is from the fixmap pool then we just clear out - * the fixmap mapping. - */ - if (unlikely((unsigned long)addr > FIXADDR_START)) { - /* This is a bit broken... we don't really know - * how big the area is so it's difficult to know - * how many fixed pages to invalidate... - * just flush tlb and hope for the best... - * consider this a FIXME - * - * Really we should be clearing out one or more page - * table entries for these virtual addresses so that - * future references cause a page fault... for now, we - * rely on two things: - * i) this code never gets called on known boards - * ii) invalid accesses to the freed areas aren't made - */ - flush_tlb_all(); - return; - } - - return vfree((void *)(PAGE_MASK & (unsigned long)addr)); -} -EXPORT_SYMBOL(iounmap); - -/** * OK, this one's a bit tricky... ioremap can get called before memory is * initialized (early serial console does this) and will want to alloc a page * for its mapping. No userspace pages will ever get allocated before memory @@ -121,10 +38,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) if (likely(mem_init_done)) { pte = (pte_t *)get_zeroed_page(GFP_KERNEL); } else { - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } return pte; diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c index dd4f2007f7c9..3115f2e4f864 100644 --- a/arch/openrisc/mm/tlb.c +++ b/arch/openrisc/mm/tlb.c @@ -23,7 +23,6 @@ #include <linux/init.h> #include <asm/tlbflush.h> -#include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/spr_defs.h> @@ -129,7 +128,7 @@ void local_flush_tlb_mm(struct mm_struct *mm) /* Was seeing bugs with the mm struct passed to us. Scrapped most of this function. */ - /* Several architctures do this */ + /* Several architectures do this */ local_flush_tlb_all(); } @@ -138,21 +137,28 @@ void local_flush_tlb_mm(struct mm_struct *mm) void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *next_tsk) { + unsigned int cpu; + + if (unlikely(prev == next)) + return; + + cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + /* remember the pgd for the fault handlers * this is similar to the pgd register in some other CPU's. * we need our own copy of it because current and active_mm * might be invalid at points where we still need to derefer * the pgd. */ - current_pgd[smp_processor_id()] = next->pgd; + current_pgd[cpu] = next->pgd; /* We don't have context support implemented, so flush all * entries belonging to previous map */ - - if (prev != next) - local_flush_tlb_mm(prev); - + local_flush_tlb_mm(prev); } /* @@ -176,12 +182,3 @@ void destroy_context(struct mm_struct *mm) flush_tlb_mm(mm); } - -/* called once during VM initialization, from init.c */ - -void __init tlb_init(void) -{ - /* Do nothing... */ - /* invalidate the entire TLB */ - /* flush_tlb_all(); */ -} |