diff options
Diffstat (limited to 'arch/openrisc')
72 files changed, 1622 insertions, 1006 deletions
diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild index b0b0f2b03f87..70bdb24ff204 100644 --- a/arch/openrisc/Kbuild +++ b/arch/openrisc/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += lib/ kernel/ mm/ -obj-y += boot/dts/ # for cleaning subdir- += boot diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index f724b3f1aeed..9156635dd264 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -10,6 +10,7 @@ config OPENRISC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select GENERIC_BUILTIN_DTB select COMMON_CLK select OF select OF_EARLY_FLATTREE @@ -20,9 +21,16 @@ config OPENRISC select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_IOMAP + select GENERIC_PCI_IOMAP + select GENERIC_IOREMAP select GENERIC_CPU_DEVICES + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE + select HAVE_PCI select HAVE_UID16 + select HAVE_PAGE_SIZE_8KB + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_SMP_IDLE_THREAD @@ -30,13 +38,13 @@ config OPENRISC select HAVE_DEBUG_STACKOVERFLOW select OR1K_PIC select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 - select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_RWLOCKS select OMPIC if SMP + select PCI_DOMAINS_GENERIC if PCI + select PCI_MSI if PCI select ARCH_WANT_FRAME_POINTERS select GENERIC_IRQ_MULTI_HANDLER select MMU_GATHER_NO_RANGE if MMU - select SET_FS select TRACE_IRQFLAGS_SUPPORT config CPU_BIG_ENDIAN @@ -62,6 +70,9 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y +config FIX_EARLYCON_MEM + def_bool y + menu "Processor type and features" choice @@ -86,7 +97,7 @@ config DCACHE_WRITETHROUGH If unsure say N here -config OPENRISC_BUILTIN_DTB +config BUILTIN_DTB_NAME string "Builtin DTB" default "" @@ -115,6 +126,59 @@ config OPENRISC_HAVE_INST_DIV default y help Select this if your implementation has a hardware divide instruction + +config OPENRISC_HAVE_INST_CMOV + bool "Have instruction l.cmov for conditional move" + default n + help + This config enables gcc to generate l.cmov instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.cmov conistional move instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_ROR + bool "Have instruction l.ror for rotate right" + default n + help + This config enables gcc to generate l.ror instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.ror rotate right instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_RORI + bool "Have instruction l.rori for rotate right with immediate" + default n + help + This config enables gcc to generate l.rori instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.rori rotate right with immediate instruction. + + Say N if you are unsure. + +config OPENRISC_HAVE_INST_SEXT + bool "Have instructions l.ext* for sign extension" + default n + help + This config enables gcc to generate l.ext* instructions when compiling + the kernel which in general will improve performance and reduce the + binary size. + + Select this if your implementation has support for the Class II + l.exths, l.extbs, l.exthz and l.extbz size extend instructions. + + Say N if you are unsure. + endmenu config NR_CPUS @@ -132,6 +196,15 @@ config SMP If you don't know what to do here, say N. +config FPU + bool "FPU support" + default y + help + Say N here if you want to disable all floating-point related procedures + in the kernel and reduce binary size. + + If you don't know what to do here, say Y. + source "kernel/Kconfig.hz" config OPENRISC_NO_SPR_SR_DSX diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 760b734fb822..68249521db5a 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -21,6 +21,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -S LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ +KBUILD_CFLAGS += -msfimm -mshftimm all: vmlinux.bin @@ -38,7 +39,21 @@ else KBUILD_CFLAGS += $(call cc-option,-msoft-div) endif -head-y := arch/openrisc/kernel/head.o +ifeq ($(CONFIG_OPENRISC_HAVE_INST_CMOV),y) + KBUILD_CFLAGS += $(call cc-option,-mcmov) +endif + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_ROR),y) + KBUILD_CFLAGS += $(call cc-option,-mror) +endif + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_RORI),y) + KBUILD_CFLAGS += $(call cc-option,-mrori) +endif + +ifeq ($(CONFIG_OPENRISC_HAVE_INST_SEXT),y) + KBUILD_CFLAGS += $(call cc-option,-msext) +endif libs-y += $(LIBGCC) diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile index 13db5a2aab52..3a66e0ef3985 100644 --- a/arch/openrisc/boot/dts/Makefile +++ b/arch/openrisc/boot/dts/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += $(addsuffix .dtb.o, $(CONFIG_OPENRISC_BUILTIN_DTB)) +dtb-y += $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME)) #DTC_FLAGS ?= -p 1024 diff --git a/arch/openrisc/configs/or1klitex_defconfig b/arch/openrisc/configs/or1klitex_defconfig index d695879a4d26..fb1eb9a68bd6 100644 --- a/arch/openrisc/configs/or1klitex_defconfig +++ b/arch/openrisc/configs/or1klitex_defconfig @@ -1,22 +1,54 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_CGROUPS=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_EMBEDDED=y -CONFIG_OPENRISC_BUILTIN_DTB="or1klitex" +CONFIG_SGETMASK_SYSCALL=y +CONFIG_EXPERT=y +CONFIG_BUILTIN_DTB_NAME="or1klitex" CONFIG_HZ_100=y +CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y CONFIG_NET=y CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_RAW_DIAG=y +# CONFIG_WIRELESS is not set +# CONFIG_ETHTOOL_NETLINK is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_OF_OVERLAY=y CONFIG_NETDEVICES=y CONFIG_LITEX_LITEETH=y +# CONFIG_WLAN is not set CONFIG_SERIAL_LITEUART=y CONFIG_SERIAL_LITEUART_CONSOLE=y CONFIG_TTY_PRINTK=y +# CONFIG_GPIO_CDEV is not set +CONFIG_MMC=y +CONFIG_MMC_LITEX=y +# CONFIG_VHOST_MENU is not set +# CONFIG_IOMMU_SUPPORT is not set CONFIG_LITEX_SOC_CONTROLLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=y CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" CONFIG_PRINTK_TIME=y CONFIG_PANIC_ON_OOPS=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index 6e1e004047c7..769705ac24d5 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -3,25 +3,23 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set CONFIG_EXPERT=y -# CONFIG_KALLSYMS is not set # CONFIG_EPOLL is not set # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set -# CONFIG_VM_EVENT_COUNTERS is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y +# CONFIG_KALLSYMS is not set +CONFIG_BUILTIN_DTB_NAME="or1ksim" +CONFIG_HZ_100=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y # CONFIG_BLOCK is not set -CONFIG_OPENRISC_BUILTIN_DTB="or1ksim" -CONFIG_HZ_100=y +CONFIG_SLUB_TINY=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_VM_EVENT_COUNTERS is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_DIAG is not set CONFIG_TCP_CONG_ADVANCED=y # CONFIG_TCP_CONG_BIC is not set @@ -34,7 +32,6 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set -CONFIG_PROC_DEVICETREE=y CONFIG_NETDEVICES=y CONFIG_ETHOC=y CONFIG_MICREL_PHY=y @@ -52,4 +49,3 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_DNOTIFY is not set CONFIG_TMPFS=y CONFIG_NFS_FS=y -# CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig index ff49d868e040..6008e824d31c 100644 --- a/arch/openrisc/configs/simple_smp_defconfig +++ b/arch/openrisc/configs/simple_smp_defconfig @@ -16,10 +16,11 @@ CONFIG_EXPERT=y # CONFIG_AIO is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y +CONFIG_SLUB=y +CONFIG_SLUB_TINY=y CONFIG_MODULES=y # CONFIG_BLOCK is not set -CONFIG_OPENRISC_BUILTIN_DTB="simple_smp" +CONFIG_BUILTIN_DTB_NAME="simple_smp" CONFIG_SMP=y CONFIG_HZ_100=y CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y diff --git a/arch/openrisc/configs/virt_defconfig b/arch/openrisc/configs/virt_defconfig new file mode 100644 index 000000000000..0b9979b35ca8 --- /dev/null +++ b/arch/openrisc/configs/virt_defconfig @@ -0,0 +1,108 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_OPENRISC_HAVE_INST_CMOV=y +CONFIG_OPENRISC_HAVE_INST_ROR=y +CONFIG_OPENRISC_HAVE_INST_RORI=y +CONFIG_OPENRISC_HAVE_INST_SEXT=y +CONFIG_NR_CPUS=8 +CONFIG_SMP=y +CONFIG_HZ_100=y +# CONFIG_OPENRISC_NO_SPR_SR_DSX is not set +CONFIG_JUMP_LABEL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=y +CONFIG_VIRTIO_BLK=y +CONFIG_NETDEVICES=y +CONFIG_VIRTIO_NET=y +CONFIG_ETHOC=y +CONFIG_MICREL_PHY=y +# CONFIG_WLAN is not set +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_SYSCON_REBOOT_MODE=y +# CONFIG_HWMON is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FB=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_INTEL8X0=y +CONFIG_SND_INTEL8X0M=y +CONFIG_SND_SOC=y +CONFIG_SND_VIRTIO=y +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_SERIAL=y +CONFIG_USB_GADGET=y +CONFIG_TYPEC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_GOLDFISH=y +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +# CONFIG_DNOTIFY is not set +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_UNICODE=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FTRACE=y diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index ca5987e11053..cef49d60d74c 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -1,9 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += syscall_table_32.h + generic-y += extable.h generic-y += kvm_para.h -generic-y += mcs_spinlock.h -generic-y += qspinlock_types.h -generic-y += qspinlock.h +generic-y += parport.h +generic-y += spinlock_types.h +generic-y += spinlock.h generic-y += qrwlock_types.h generic-y += qrwlock.h generic-y += user.h diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index 326167e4783a..8ce67ec7c9a3 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -130,7 +130,4 @@ static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) #include <asm/cmpxchg.h> -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) - #endif /* __ASM_OPENRISC_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h index 1e224b616fdf..4827b66530b2 100644 --- a/arch/openrisc/include/asm/bitops/__ffs.h +++ b/arch/openrisc/include/asm/bitops/__ffs.h @@ -11,7 +11,7 @@ #ifdef CONFIG_OPENRISC_HAVE_INST_FF1 -static inline unsigned long __ffs(unsigned long x) +static inline __attribute_const__ unsigned long __ffs(unsigned long x) { int ret; diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h index 9658446ad141..637cc76fe4b7 100644 --- a/arch/openrisc/include/asm/bitops/__fls.h +++ b/arch/openrisc/include/asm/bitops/__fls.h @@ -11,7 +11,7 @@ #ifdef CONFIG_OPENRISC_HAVE_INST_FL1 -static inline unsigned long __fls(unsigned long x) +static inline __attribute_const__ unsigned long __fls(unsigned long x) { int ret; diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h index b4c835d6bc84..536a60ab9cc3 100644 --- a/arch/openrisc/include/asm/bitops/ffs.h +++ b/arch/openrisc/include/asm/bitops/ffs.h @@ -10,7 +10,7 @@ #ifdef CONFIG_OPENRISC_HAVE_INST_FF1 -static inline int ffs(int x) +static inline __attribute_const__ int ffs(int x) { int ret; diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h index 6b77f6556fb9..77da7639bb3e 100644 --- a/arch/openrisc/include/asm/bitops/fls.h +++ b/arch/openrisc/include/asm/bitops/fls.h @@ -11,7 +11,7 @@ #ifdef CONFIG_OPENRISC_HAVE_INST_FL1 -static inline int fls(unsigned int x) +static inline __attribute_const__ int fls(unsigned int x) { int ret; diff --git a/arch/openrisc/include/asm/bug.h b/arch/openrisc/include/asm/bug.h new file mode 100644 index 000000000000..6d04776eaf10 --- /dev/null +++ b/arch/openrisc/include/asm/bug.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_OPENRISC_BUG_H +#define __ASM_OPENRISC_BUG_H + +#include <asm-generic/bug.h> + +struct pt_regs; + +void __noreturn die(const char *str, struct pt_regs *regs, long err); + +#endif /* __ASM_OPENRISC_BUG_H */ diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h index eeac40d4a854..cd8f971c0fec 100644 --- a/arch/openrisc/include/asm/cacheflush.h +++ b/arch/openrisc/include/asm/cacheflush.h @@ -23,6 +23,9 @@ */ extern void local_dcache_page_flush(struct page *page); extern void local_icache_page_inv(struct page *page); +extern void local_dcache_range_flush(unsigned long start, unsigned long end); +extern void local_dcache_range_inv(unsigned long start, unsigned long end); +extern void local_icache_range_inv(unsigned long start, unsigned long end); /* * Data cache flushing always happen on the local cpu. Instruction cache @@ -39,6 +42,20 @@ extern void smp_icache_page_inv(struct page *page); #endif /* CONFIG_SMP */ /* + * Even if the actual block size is larger than L1_CACHE_BYTES, paddr + * can be incremented by L1_CACHE_BYTES. When paddr is written to the + * invalidate register, the entire cache line encompassing this address + * is invalidated. Each subsequent reference to the same cache line will + * not affect the invalidation process. + */ +#define local_dcache_block_flush(addr) \ + local_dcache_range_flush(addr, addr + L1_CACHE_BYTES) +#define local_dcache_block_inv(addr) \ + local_dcache_range_inv(addr, addr + L1_CACHE_BYTES) +#define local_icache_block_inv(addr) \ + local_icache_range_inv(addr, addr + L1_CACHE_BYTES) + +/* * Synchronizes caches. Whenever a cpu writes executable code to memory, this * should be called to make sure the processor sees the newly written code. */ @@ -56,10 +73,16 @@ static inline void sync_icache_dcache(struct page *page) */ #define PG_dc_clean PG_arch_1 +static inline void flush_dcache_folio(struct folio *folio) +{ + clear_bit(PG_dc_clean, &folio->flags.f); +} +#define flush_dcache_folio flush_dcache_folio + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) { - clear_bit(PG_dc_clean, &page->flags); + flush_dcache_folio(page_folio(page)); } #define flush_icache_user_page(vma, page, addr, len) \ diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index 79fd16162ccb..8ee151c072e4 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -147,8 +147,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); -static inline unsigned long __xchg(volatile void *ptr, unsigned long with, - int size) +static inline unsigned long +__arch_xchg(volatile void *ptr, unsigned long with, int size) { switch (size) { case 1: @@ -163,9 +163,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with, #define arch_xchg(ptr, with) \ ({ \ - (__typeof__(*(ptr))) __xchg((ptr), \ - (unsigned long)(with), \ - sizeof(*(ptr))); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), \ + (unsigned long)(with), \ + sizeof(*(ptr))); \ }) #endif /* __ASM_OPENRISC_CMPXCHG_H */ diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h index 5e4744153d0e..3cfc4cf0b019 100644 --- a/arch/openrisc/include/asm/cpuinfo.h +++ b/arch/openrisc/include/asm/cpuinfo.h @@ -15,16 +15,21 @@ #ifndef __ASM_OPENRISC_CPUINFO_H #define __ASM_OPENRISC_CPUINFO_H +#include <asm/spr.h> +#include <asm/spr_defs.h> + +struct cache_desc { + u32 size; + u32 sets; + u32 block_size; + u32 ways; +}; + struct cpuinfo_or1k { u32 clock_frequency; - u32 icache_size; - u32 icache_block_size; - u32 icache_ways; - - u32 dcache_size; - u32 dcache_block_size; - u32 dcache_ways; + struct cache_desc icache; + struct cache_desc dcache; u16 coreid; }; @@ -32,4 +37,9 @@ struct cpuinfo_or1k { extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; extern void setup_cpuinfo(void); +/* + * Check if the cache component exists. + */ +extern bool cpu_cache_is_present(const unsigned int cache_type); + #endif /* __ASM_OPENRISC_CPUINFO_H */ diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h index ad78e50b7ba3..74000215064d 100644 --- a/arch/openrisc/include/asm/fixmap.h +++ b/arch/openrisc/include/asm/fixmap.h @@ -26,59 +26,20 @@ #include <linux/bug.h> #include <asm/page.h> -/* - * On OpenRISC we use these special fixed_addresses for doing ioremap - * early in the boot process before memory initialization is complete. - * This is used, in particular, by the early serial console code. - * - * It's not really 'fixmap', per se, but fits loosely into the same - * paradigm. - */ enum fixed_addresses { - /* - * FIX_IOREMAP entries are useful for mapping physical address - * space before ioremap() is useable, e.g. really early in boot - * before kmalloc() is working. - */ -#define FIX_N_IOREMAPS 32 - FIX_IOREMAP_BEGIN, - FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1, + FIX_EARLYCON_MEM_BASE, + FIX_TEXT_POKE0, __end_of_fixed_addresses }; #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) /* FIXADDR_BOTTOM might be a better name here... */ #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO PAGE_KERNEL_NOCACHE -#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ - /* - * this branch gets completely eliminated after inlining, - * except when someone tries to use fixaddr indices in an - * illegal way. (such as mixing up address types or using - * out-of-range indices). - * - * If it doesn't get removed, the linker will complain - * loudly with a reasonably clear error message.. - */ - if (idx >= __end_of_fixed_addresses) - BUG(); - - return __fix_to_virt(idx); -} +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h> #endif diff --git a/arch/openrisc/include/asm/fpu.h b/arch/openrisc/include/asm/fpu.h new file mode 100644 index 000000000000..57bc44d80d53 --- /dev/null +++ b/arch/openrisc/include/asm/fpu.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_OPENRISC_FPU_H +#define __ASM_OPENRISC_FPU_H + +struct task_struct; + +#ifdef CONFIG_FPU +static inline void save_fpu(struct task_struct *task) +{ + task->thread.fpcsr = mfspr(SPR_FPCSR); +} + +static inline void restore_fpu(struct task_struct *task) +{ + mtspr(SPR_FPCSR, task->thread.fpcsr); +} +#else +#define save_fpu(tsk) do { } while (0) +#define restore_fpu(tsk) do { } while (0) +#endif + +#endif /* __ASM_OPENRISC_FPU_H */ diff --git a/arch/openrisc/include/asm/insn-def.h b/arch/openrisc/include/asm/insn-def.h new file mode 100644 index 000000000000..1e0c028a5b95 --- /dev/null +++ b/arch/openrisc/include/asm/insn-def.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Chen Miao + */ + +#ifndef __ASM_OPENRISC_INSN_DEF_H +#define __ASM_OPENRISC_INSN_DEF_H + +/* or1k instructions are always 32 bits. */ +#define OPENRISC_INSN_SIZE 4 + +/* or1k nop instruction code */ +#define OPENRISC_INSN_NOP 0x15000000U + +#endif /* __ASM_OPENRISC_INSN_DEF_H */ diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h index c298061c70a7..5a6f0f16a5ce 100644 --- a/arch/openrisc/include/asm/io.h +++ b/arch/openrisc/include/asm/io.h @@ -15,9 +15,11 @@ #define __ASM_OPENRISC_IO_H #include <linux/types.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> /* - * PCI: can we really do 0 here if we have no port IO? + * PCI: We do not use IO ports in OpenRISC */ #define IO_SPACE_LIMIT 0 @@ -27,11 +29,10 @@ #define PIO_OFFSET 0 #define PIO_MASK 0 -#define ioremap ioremap -void __iomem *ioremap(phys_addr_t offset, unsigned long size); - -#define iounmap iounmap -extern void iounmap(void __iomem *addr); +/* + * I/O memory mapping functions. + */ +#define _PAGE_IOREMAP (pgprot_val(PAGE_KERNEL) | _PAGE_CI) #include <asm-generic/io.h> diff --git a/arch/openrisc/include/asm/jump_label.h b/arch/openrisc/include/asm/jump_label.h new file mode 100644 index 000000000000..3ec0f4e19f9c --- /dev/null +++ b/arch/openrisc/include/asm/jump_label.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Chen Miao + * + * Based on arch/arm/include/asm/jump_label.h + */ +#ifndef __ASM_OPENRISC_JUMP_LABEL_H +#define __ASM_OPENRISC_JUMP_LABEL_H + +#ifndef __ASSEMBLER__ + +#include <linux/types.h> +#include <asm/insn-def.h> + +#define HAVE_JUMP_LABEL_BATCH + +#define JUMP_LABEL_NOP_SIZE OPENRISC_INSN_SIZE + +/** + * JUMP_TABLE_ENTRY - Create a jump table entry + * @key: Jump key identifier (typically a symbol address) + * @label: Target label address + * + * This macro creates a jump table entry in the dedicated kernel section (__jump_table). + * Each entry contains the following information: + * Offset from current instruction to jump instruction (1b - .) + * Offset from current instruction to target label (label - .) + * Offset from current instruction to key identifier (key - .) + */ +#define JUMP_TABLE_ENTRY(key, label) \ + ".pushsection __jump_table, \"aw\" \n\t" \ + ".align 4 \n\t" \ + ".long 1b - ., " label " - . \n\t" \ + ".long " key " - . \n\t" \ + ".popsection \n\t" + +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + ".align 4 \n\t" \ + "1: l.nop \n\t" \ + " l.nop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + +static __always_inline bool arch_static_branch(struct static_key *const key, + const bool branch) +{ + asm goto (ARCH_STATIC_BRANCH_ASM("%0", "%l[l_yes]") + ::"i"(&((char *)key)[branch])::l_yes); + + return false; +l_yes: + return true; +} + +#define ARCH_STATIC_BRANCH_JUMP_ASM(key, label) \ + ".align 4 \n\t" \ + "1: l.j " label " \n\t" \ + " l.nop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + +static __always_inline bool +arch_static_branch_jump(struct static_key *const key, const bool branch) +{ + asm goto (ARCH_STATIC_BRANCH_JUMP_ASM("%0", "%l[l_yes]") + ::"i"(&((char *)key)[branch])::l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLER__ */ +#endif /* __ASM_OPENRISC_JUMP_LABEL_H */ diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h index eb720110f3a2..e7826a681bc4 100644 --- a/arch/openrisc/include/asm/mmu.h +++ b/arch/openrisc/include/asm/mmu.h @@ -15,7 +15,7 @@ #ifndef __ASM_OPENRISC_MMU_H #define __ASM_OPENRISC_MMU_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ typedef unsigned long mm_context_t; #endif diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index aab6e64d6db4..85797f94d1d7 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -15,16 +15,7 @@ #ifndef __ASM_OPENRISC_PAGE_H #define __ASM_OPENRISC_PAGE_H - -/* PAGE_SHIFT determines the page size */ - -#define PAGE_SHIFT 13 -#ifdef __ASSEMBLY__ -#define PAGE_SIZE (1 << PAGE_SHIFT) -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> #define PAGE_OFFSET 0xc0000000 #define KERNELBASE PAGE_OFFSET @@ -34,7 +25,7 @@ */ #include <asm/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) @@ -64,27 +55,25 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} #define virt_to_page(addr) \ (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) - -#define pfn_valid(pfn) ((pfn) < max_mapnr) - #define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr))) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index b7b2b8d16fad..3f110931d8f6 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -41,15 +41,13 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd_t *ret = __pgd_alloc(mm, 0); - if (ret) { - memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + if (ret) memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } return ret; } @@ -67,9 +65,6 @@ extern inline pgd_t *pgd_alloc(struct mm_struct *mm) extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); #define __pte_free_tlb(tlb, pte, addr) \ -do { \ - pgtable_pte_page_dtor(pte); \ - tlb_remove_page((tlb), (pte)); \ -} while (0) + tlb_remove_ptdesc((tlb), page_ptdesc(pte)) #endif diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index cdd657f80bfa..b218050e2f6d 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -23,7 +23,7 @@ #include <asm-generic/pgtable-nopmd.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/mmu.h> #include <asm/fixmap.h> @@ -46,7 +46,7 @@ extern void paging_init(void); * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -154,6 +154,9 @@ extern void paging_init(void); #define _KERNPG_TABLE \ (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY) +/* We borrow bit 11 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE _PAGE_U_SHARED + #define PAGE_NONE __pgprot(_PAGE_ALL) #define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) @@ -176,45 +179,10 @@ extern void paging_init(void); __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X - /* zero page used for uninitialized stuff */ extern unsigned long empty_zero_page[2048]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -/* number of bits that fit into a memory pointer */ -#define BITS_PER_PTR (8*sizeof(unsigned long)) - -/* to align the pointer to a pointer address */ -#define PTR_MASK (~(sizeof(void *)-1)) - -/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ -/* 64-bit machines, beware! SRB. */ -#define SIZEOF_PTR_LOG2 2 - -/* to find an entry in a page-table */ -#define PAGE_PTR(address) \ -((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) - -/* to set the page-dir */ -#define SET_PAGE_DIR(tsk, pgdir) - #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) @@ -265,7 +233,7 @@ static inline pte_t pte_mkold(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; @@ -314,8 +282,6 @@ static inline pte_t __mk_pte(void *page, pgprot_t pgprot) return pte; } -#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) - #define mk_pte_phys(physpage, pgprot) \ ({ \ pte_t __pte; \ @@ -361,6 +327,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; } +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) static inline unsigned long pmd_page_vaddr(pmd_t pmd) @@ -371,6 +338,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define __pmd_offset(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +#define PFN_PTE_SHIFT PAGE_SHIFT #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) @@ -393,28 +361,57 @@ static inline void update_tlb(struct vm_area_struct *vma, extern void update_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *pte) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { - update_tlb(vma, address, pte); - update_cache(vma, address, pte); + update_tlb(vma, address, ptep); + update_cache(vma, address, ptep); } -/* __PHX__ FIXME, SWAP, this probably doesn't work */ +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) -/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ -/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ +/* __PHX__ FIXME, SWAP, this probably doesn't work */ -#define __swp_type(x) (((x).val >> 5) & 0x7f) +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <-------------- offset ---------------> E <- type --> 0 0 0 0 0 + * + * E is the exclusive marker that is not stored in swap entries. + * The zero'ed bits include _PAGE_PRESENT. + */ +#define __swp_type(x) (((x).val >> 5) & 0x3f) #define __swp_offset(x) ((x).val >> 12) #define __swp_entry(type, offset) \ - ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) + ((swp_entry_t) { (((type) & 0x3f) << 5) | ((offset) << 12) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define kern_addr_valid(addr) (1) +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} typedef pte_t *pte_addr_t; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_OPENRISC_PGTABLE_H */ diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index aa1699c18add..3ff893a67c13 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h @@ -39,11 +39,12 @@ */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; struct thread_struct { + long fpcsr; /* Floating point control status register. */ }; /* @@ -72,10 +73,10 @@ struct thread_struct { void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); -void release_thread(struct task_struct *); unsigned long __get_wchan(struct task_struct *p); +void show_registers(struct pt_regs *regs); #define cpu_relax() barrier() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_OPENRISC_PROCESSOR_H */ diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h index 01f81d4e97dc..28facf2f3e00 100644 --- a/arch/openrisc/include/asm/ptrace.h +++ b/arch/openrisc/include/asm/ptrace.h @@ -17,6 +17,7 @@ #include <asm/spr_defs.h> #include <uapi/asm/ptrace.h> +#include <linux/compiler.h> /* * Make kernel PTrace/register structures opaque to userspace... userspace can @@ -26,7 +27,7 @@ * they share a cacheline (not done yet, though... future optimization). */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This struct describes how the registers are laid out on the kernel stack * during a syscall or other kernel entry. @@ -42,6 +43,36 @@ struct pt_regs { /* Named registers */ long sr; /* Stored in place of r0 */ long sp; /* r1 */ + long gpr2; + long gpr3; + long gpr4; + long gpr5; + long gpr6; + long gpr7; + long gpr8; + long gpr9; + long gpr10; + long gpr11; + long gpr12; + long gpr13; + long gpr14; + long gpr15; + long gpr16; + long gpr17; + long gpr18; + long gpr19; + long gpr20; + long gpr21; + long gpr22; + long gpr23; + long gpr24; + long gpr25; + long gpr26; + long gpr27; + long gpr28; + long gpr29; + long gpr30; + long gpr31; }; struct { /* Old style */ @@ -66,17 +97,57 @@ struct pt_regs { /* TODO: Rename this to REDZONE because that's what it is */ #define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */ -#define instruction_pointer(regs) ((regs)->pc) +#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_gpr11) + +/* Helpers for working with the instruction pointer */ +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return (unsigned long)regs->pc; +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->pc = val; +} + #define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0) #define user_stack_pointer(regs) ((unsigned long)(regs)->sp) #define profile_pc(regs) instruction_pointer(regs) +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return (unsigned long)regs->sp; +} + static inline long regs_return_value(struct pt_regs *regs) { return regs->gpr[11]; } -#endif /* __ASSEMBLY__ */ +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +#endif /* __ASSEMBLER__ */ /* * Offsets used by 'ptrace' system call interface. @@ -115,6 +186,5 @@ static inline long regs_return_value(struct pt_regs *regs) #define PT_GPR31 124 #define PT_PC 128 #define PT_ORIG_GPR11 132 -#define PT_SYSCALLNO 136 #endif /* __ASM_OPENRISC_PTRACE_H */ diff --git a/arch/openrisc/include/asm/setup.h b/arch/openrisc/include/asm/setup.h index 9acbc5deda69..dce9f4d3b378 100644 --- a/arch/openrisc/include/asm/setup.h +++ b/arch/openrisc/include/asm/setup.h @@ -8,7 +8,7 @@ #include <linux/init.h> #include <asm-generic/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ void __init or1k_early_setup(void *fdt); #endif diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h deleted file mode 100644 index 264944a71535..000000000000 --- a/arch/openrisc/include/asm/spinlock.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * OpenRISC Linux - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * OpenRISC implementation: - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * et al. - */ - -#ifndef __ASM_OPENRISC_SPINLOCK_H -#define __ASM_OPENRISC_SPINLOCK_H - -#include <asm/qspinlock.h> - -#include <asm/qrwlock.h> - -#define arch_spin_relax(lock) cpu_relax() -#define arch_read_relax(lock) cpu_relax() -#define arch_write_relax(lock) cpu_relax() - - -#endif diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h deleted file mode 100644 index 7c6fb1208c88..000000000000 --- a/arch/openrisc/include/asm/spinlock_types.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H -#define _ASM_OPENRISC_SPINLOCK_TYPES_H - -#include <asm/qspinlock_types.h> -#include <asm/qrwlock_types.h> - -#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */ diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index 903ed882bdec..5e037d9659c5 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h @@ -26,6 +26,12 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) } static inline void +syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr) +{ + regs->orig_gpr11 = nr; +} + +static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { regs->gpr[11] = regs->orig_gpr11; @@ -57,6 +63,13 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); } +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + const unsigned long *args) +{ + memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); +} + static inline int syscall_get_arch(struct task_struct *task) { return AUDIT_ARCH_OPENRISC; diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h index aa1c7e98722e..9f4c47961bea 100644 --- a/arch/openrisc/include/asm/syscalls.h +++ b/arch/openrisc/include/asm/syscalls.h @@ -25,8 +25,4 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size); asmlinkage long __sys_fork(void); -#define sys_clone __sys_clone -#define sys_clone3 __sys_clone3 -#define sys_fork __sys_fork - #endif /* __ASM_OPENRISC_SYSCALLS_H */ diff --git a/arch/openrisc/include/asm/text-patching.h b/arch/openrisc/include/asm/text-patching.h new file mode 100644 index 000000000000..d19098dac0cc --- /dev/null +++ b/arch/openrisc/include/asm/text-patching.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Chen Miao + */ + +#ifndef _ASM_OPENRISC_PATCHING_H +#define _ASM_OPENRISC_PATCHING_H + +#include <linux/types.h> + +int patch_insn_write(void *addr, u32 insn); + +#endif /* _ASM_OPENRISC_PATCHING_H */ diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 659834ab87fa..e338fff7efb0 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -17,7 +17,7 @@ #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> #include <asm/processor.h> #endif @@ -38,9 +38,7 @@ * - if the contents of this structure are changed, the assembly constants * must also be changed */ -#ifndef __ASSEMBLY__ - -typedef unsigned long mm_segment_t; +#ifndef __ASSEMBLER__ struct thread_info { struct task_struct *task; /* main task structure */ @@ -48,10 +46,6 @@ struct thread_info { __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* thread address space: - 0-0x7FFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread - */ __u8 supervisor_stack[0]; /* saved context data */ @@ -64,14 +58,13 @@ struct thread_info { * * preempt_count needs to be 1 initially, until the scheduler is functional. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ .ksp = 0, \ } @@ -82,7 +75,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define get_thread_info(ti) get_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* * thread information flags diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h index d52b4e536e3f..5487fa93dd9b 100644 --- a/arch/openrisc/include/asm/timex.h +++ b/arch/openrisc/include/asm/timex.h @@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void) { return mfspr(SPR_TTCR); } +#define get_cycles get_cycles /* This isn't really used any more */ #define CLOCK_TICK_RATE 1000 diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 120f5005461b..d6500a374e18 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -22,44 +22,7 @@ #include <linux/string.h> #include <asm/page.h> #include <asm/extable.h> - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ - -/* addr_limit is the maximum accessible address for the task. we misuse - * the KERNEL_DS and USER_DS values to both assign and compare the - * addr_limit values through the equally misnamed get/set_fs macros. - * (see above) - */ - -#define KERNEL_DS (~0UL) - -#define USER_DS (TASK_SIZE) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs() == KERNEL_DS) - -/* Ensure that the range from addr to addr+size is all within the process' - * address space - */ -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - const mm_segment_t fs = get_fs(); - - return size <= fs && addr <= (fs - size); -} - -#define access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) +#include <asm-generic/access_ok.h> /* * These are the main single-value transfer routines. They automatically @@ -268,9 +231,6 @@ clear_user(void __user *addr, unsigned long size) return size; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h new file mode 100644 index 000000000000..c73f65e18d3b --- /dev/null +++ b/arch/openrisc/include/asm/unistd.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_TIME32_SYSCALLS + +#include <uapi/asm/unistd.h> diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index e78470141932..2501e82a1a0a 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += unistd_32.h + generic-y += ucontext.h diff --git a/arch/openrisc/include/uapi/asm/elf.h b/arch/openrisc/include/uapi/asm/elf.h index e892d5061685..441e343f8268 100644 --- a/arch/openrisc/include/uapi/asm/elf.h +++ b/arch/openrisc/include/uapi/asm/elf.h @@ -34,15 +34,72 @@ #include <asm/ptrace.h> /* The OR1K relocation types... not all relevant for module loader */ -#define R_OR32_NONE 0 -#define R_OR32_32 1 -#define R_OR32_16 2 -#define R_OR32_8 3 -#define R_OR32_CONST 4 -#define R_OR32_CONSTH 5 -#define R_OR32_JUMPTARG 6 -#define R_OR32_VTINHERIT 7 -#define R_OR32_VTENTRY 8 +#define R_OR1K_NONE 0 +#define R_OR1K_32 1 +#define R_OR1K_16 2 +#define R_OR1K_8 3 +#define R_OR1K_LO_16_IN_INSN 4 +#define R_OR1K_HI_16_IN_INSN 5 +#define R_OR1K_INSN_REL_26 6 +#define R_OR1K_GNU_VTENTRY 7 +#define R_OR1K_GNU_VTINHERIT 8 +#define R_OR1K_32_PCREL 9 +#define R_OR1K_16_PCREL 10 +#define R_OR1K_8_PCREL 11 +#define R_OR1K_GOTPC_HI16 12 +#define R_OR1K_GOTPC_LO16 13 +#define R_OR1K_GOT16 14 +#define R_OR1K_PLT26 15 +#define R_OR1K_GOTOFF_HI16 16 +#define R_OR1K_GOTOFF_LO16 17 +#define R_OR1K_COPY 18 +#define R_OR1K_GLOB_DAT 19 +#define R_OR1K_JMP_SLOT 20 +#define R_OR1K_RELATIVE 21 +#define R_OR1K_TLS_GD_HI16 22 +#define R_OR1K_TLS_GD_LO16 23 +#define R_OR1K_TLS_LDM_HI16 24 +#define R_OR1K_TLS_LDM_LO16 25 +#define R_OR1K_TLS_LDO_HI16 26 +#define R_OR1K_TLS_LDO_LO16 27 +#define R_OR1K_TLS_IE_HI16 28 +#define R_OR1K_TLS_IE_LO16 29 +#define R_OR1K_TLS_LE_HI16 30 +#define R_OR1K_TLS_LE_LO16 31 +#define R_OR1K_TLS_TPOFF 32 +#define R_OR1K_TLS_DTPOFF 33 +#define R_OR1K_TLS_DTPMOD 34 +#define R_OR1K_AHI16 35 +#define R_OR1K_GOTOFF_AHI16 36 +#define R_OR1K_TLS_IE_AHI16 37 +#define R_OR1K_TLS_LE_AHI16 38 +#define R_OR1K_SLO16 39 +#define R_OR1K_GOTOFF_SLO16 40 +#define R_OR1K_TLS_LE_SLO16 41 +#define R_OR1K_PCREL_PG21 42 +#define R_OR1K_GOT_PG21 43 +#define R_OR1K_TLS_GD_PG21 44 +#define R_OR1K_TLS_LDM_PG21 45 +#define R_OR1K_TLS_IE_PG21 46 +#define R_OR1K_LO13 47 +#define R_OR1K_GOT_LO13 48 +#define R_OR1K_TLS_GD_LO13 49 +#define R_OR1K_TLS_LDM_LO13 50 +#define R_OR1K_TLS_IE_LO13 51 +#define R_OR1K_SLO13 52 +#define R_OR1K_PLTA26 53 +#define R_OR1K_GOT_AHI16 54 + +/* Old relocation names */ +#define R_OR32_NONE R_OR1K_NONE +#define R_OR32_32 R_OR1K_32 +#define R_OR32_16 R_OR1K_16 +#define R_OR32_8 R_OR1K_8 +#define R_OR32_CONST R_OR1K_LO_16_IN_INSN +#define R_OR32_CONSTH R_OR1K_HI_16_IN_INSN +#define R_OR32_JUMPTARG R_OR1K_INSN_REL_26 +#define R_OR32_VTENTRY R_OR1K_GNU_VTENTRY +#define R_OR32_VTINHERIT R_OR1K_GNU_VTINHERIT typedef unsigned long elf_greg_t; @@ -53,8 +110,7 @@ typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -/* A placeholder; OR32 does not have fp support yes, so no fp regs for now. */ -typedef unsigned long elf_fpregset_t; +typedef struct __or1k_fpu_state elf_fpregset_t; /* EM_OPENRISC is defined in linux/elf-em.h */ #define EM_OR32 0x8472 diff --git a/arch/openrisc/include/uapi/asm/ptrace.h b/arch/openrisc/include/uapi/asm/ptrace.h index d4fab268f6aa..1f12a60d5a06 100644 --- a/arch/openrisc/include/uapi/asm/ptrace.h +++ b/arch/openrisc/include/uapi/asm/ptrace.h @@ -20,7 +20,7 @@ #ifndef _UAPI__ASM_OPENRISC_PTRACE_H #define _UAPI__ASM_OPENRISC_PTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This is the layout of the regset returned by the GETREGSET ptrace call */ @@ -30,6 +30,10 @@ struct user_regs_struct { unsigned long pc; unsigned long sr; }; + +struct __or1k_fpu_state { + unsigned long fpcsr; +}; #endif diff --git a/arch/openrisc/include/uapi/asm/sigcontext.h b/arch/openrisc/include/uapi/asm/sigcontext.h index 8ab775fc3450..e7ffb58ff58f 100644 --- a/arch/openrisc/include/uapi/asm/sigcontext.h +++ b/arch/openrisc/include/uapi/asm/sigcontext.h @@ -28,7 +28,10 @@ struct sigcontext { struct user_regs_struct regs; /* needs to be first */ - unsigned long oldmask; + union { + unsigned long fpcsr; + unsigned long oldmask; /* unused */ + }; }; #endif /* __ASM_OPENRISC_SIGCONTEXT_H */ diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index fae34c60fa88..46b94d454efd 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -17,17 +17,4 @@ * (at your option) any later version. */ -#define sys_mmap2 sys_mmap_pgoff - -#define __ARCH_WANT_RENAMEAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SET_GET_RLIMIT -#define __ARCH_WANT_SYS_FORK -#define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 -#define __ARCH_WANT_TIME32_SYSCALLS - -#include <asm-generic/unistd.h> - -#define __NR_or1k_atomic __NR_arch_specific_syscall -__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic) +#include <asm/unistd_32.h> diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile index 2d172e79f58d..19e0eb94f2eb 100644 --- a/arch/openrisc/kernel/Makefile +++ b/arch/openrisc/kernel/Makefile @@ -3,15 +3,17 @@ # Makefile for the linux kernel. # -extra-y := head.o vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds -obj-y := setup.o or32_ksyms.o process.o dma.o \ +obj-y := head.o setup.o or32_ksyms.o process.o dma.o \ traps.o time.o irq.o entry.o ptrace.o signal.o \ - sys_call_table.o unwinder.o + sys_call_table.o unwinder.o cacheinfo.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_SMP) += smp.o sync-timer.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_OF) += prom.o +obj-y += patching.o clean: diff --git a/arch/openrisc/kernel/Makefile.syscalls b/arch/openrisc/kernel/Makefile.syscalls new file mode 100644 index 000000000000..525a1e7e7fc9 --- /dev/null +++ b/arch/openrisc/kernel/Makefile.syscalls @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +syscall_abis_32 += or1k time32 stat64 rlimit renameat diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c index 710651d5aaae..3cc826f2216b 100644 --- a/arch/openrisc/kernel/asm-offsets.c +++ b/arch/openrisc/kernel/asm-offsets.c @@ -18,6 +18,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include <linux/signal.h> #include <linux/sched.h> diff --git a/arch/openrisc/kernel/cacheinfo.c b/arch/openrisc/kernel/cacheinfo.c new file mode 100644 index 000000000000..61230545e4ff --- /dev/null +++ b/arch/openrisc/kernel/cacheinfo.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * OpenRISC cacheinfo support + * + * Based on work done for MIPS and LoongArch. All original copyrights + * apply as per the original source declaration. + * + * OpenRISC implementation: + * Copyright (C) 2025 Sahil Siddiq <sahilcdq@proton.me> + */ + +#include <linux/cacheinfo.h> +#include <asm/cpuinfo.h> +#include <asm/spr.h> +#include <asm/spr_defs.h> + +static inline void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type, + unsigned int level, struct cache_desc *cache, int cpu) +{ + this_leaf->type = type; + this_leaf->level = level; + this_leaf->coherency_line_size = cache->block_size; + this_leaf->number_of_sets = cache->sets; + this_leaf->ways_of_associativity = cache->ways; + this_leaf->size = cache->size; + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); +} + +int init_cache_level(unsigned int cpu) +{ + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + int leaves = 0, levels = 0; + unsigned long upr = mfspr(SPR_UPR); + unsigned long iccfgr, dccfgr; + + if (!(upr & SPR_UPR_UP)) { + printk(KERN_INFO + "-- no UPR register... unable to detect configuration\n"); + return -ENOENT; + } + + if (cpu_cache_is_present(SPR_UPR_DCP)) { + dccfgr = mfspr(SPR_DCCFGR); + cpuinfo->dcache.ways = 1 << (dccfgr & SPR_DCCFGR_NCW); + cpuinfo->dcache.sets = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); + cpuinfo->dcache.block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); + cpuinfo->dcache.size = + cpuinfo->dcache.sets * cpuinfo->dcache.ways * cpuinfo->dcache.block_size; + leaves += 1; + printk(KERN_INFO + "-- dcache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n", + cpuinfo->dcache.size, cpuinfo->dcache.block_size, + cpuinfo->dcache.sets, cpuinfo->dcache.ways); + } else + printk(KERN_INFO "-- dcache disabled\n"); + + if (cpu_cache_is_present(SPR_UPR_ICP)) { + iccfgr = mfspr(SPR_ICCFGR); + cpuinfo->icache.ways = 1 << (iccfgr & SPR_ICCFGR_NCW); + cpuinfo->icache.sets = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); + cpuinfo->icache.block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); + cpuinfo->icache.size = + cpuinfo->icache.sets * cpuinfo->icache.ways * cpuinfo->icache.block_size; + leaves += 1; + printk(KERN_INFO + "-- icache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n", + cpuinfo->icache.size, cpuinfo->icache.block_size, + cpuinfo->icache.sets, cpuinfo->icache.ways); + } else + printk(KERN_INFO "-- icache disabled\n"); + + if (!leaves) + return -ENOENT; + + levels = 1; + + this_cpu_ci->num_leaves = leaves; + this_cpu_ci->num_levels = levels; + + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + int level = 1; + + if (cpu_cache_is_present(SPR_UPR_DCP)) { + ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu); + this_leaf->attributes = ((mfspr(SPR_DCCFGR) & SPR_DCCFGR_CWS) >> 8) ? + CACHE_WRITE_BACK : CACHE_WRITE_THROUGH; + this_leaf++; + } + + if (cpu_cache_is_present(SPR_UPR_ICP)) + ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu); + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index a82b2caaa560..af932a4ad306 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -17,6 +17,7 @@ #include <linux/pagewalk.h> #include <asm/cpuinfo.h> +#include <asm/cacheflush.h> #include <asm/spr_defs.h> #include <asm/tlbflush.h> @@ -24,9 +25,6 @@ static int page_set_nocache(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { - unsigned long cl; - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - pte_val(*pte) |= _PAGE_CI; /* @@ -36,8 +34,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, flush_tlb_kernel_range(addr, addr + PAGE_SIZE); /* Flush page out of dcache */ - for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBFR, cl); + local_dcache_range_flush(__pa(addr), __pa(next)); return 0; } @@ -74,10 +71,10 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size) * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. */ - mmap_read_lock(&init_mm); - error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, - NULL); - mmap_read_unlock(&init_mm); + mmap_write_lock(&init_mm); + error = walk_kernel_page_table_range(va, va + size, + &set_nocache_walk_ops, NULL, NULL); + mmap_write_unlock(&init_mm); if (error) return ERR_PTR(error); @@ -88,31 +85,24 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size) { unsigned long va = (unsigned long)cpu_addr; - mmap_read_lock(&init_mm); + mmap_write_lock(&init_mm); /* walk_page_range shouldn't be able to fail here */ - WARN_ON(walk_page_range(&init_mm, va, va + size, - &clear_nocache_walk_ops, NULL)); - mmap_read_unlock(&init_mm); + WARN_ON(walk_kernel_page_table_range(va, va + size, + &clear_nocache_walk_ops, NULL, NULL)); + mmap_write_unlock(&init_mm); } void arch_sync_dma_for_device(phys_addr_t addr, size_t size, enum dma_data_direction dir) { - unsigned long cl; - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ - for (cl = addr; cl < addr + size; - cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBFR, cl); + local_dcache_range_flush(addr, addr + size); break; case DMA_FROM_DEVICE: /* Invalidate the dcache for the requested range */ - for (cl = addr; cl < addr + size; - cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBIR, cl); + local_dcache_range_inv(addr, addr + size); break; default: /* diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 3ca1b1f490b9..c7e90b09645e 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -173,7 +173,6 @@ handler: ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ /* r30 already save */ ;\ -/* l.sw PT_GPR30(r1),r30*/ ;\ l.sw PT_GPR31(r1),r31 ;\ TRACE_IRQS_OFF_ENTRY ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ @@ -211,9 +210,8 @@ handler: ;\ l.sw PT_GPR27(r1),r27 ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ - /* r31 already saved */ ;\ - l.sw PT_GPR30(r1),r30 ;\ -/* l.sw PT_GPR31(r1),r31 */ ;\ + /* r30 already saved */ ;\ + l.sw PT_GPR31(r1),r31 ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ l.addi r30,r0,-1 ;\ l.sw PT_ORIG_GPR11(r1),r30 ;\ @@ -241,6 +239,8 @@ handler: ;\ /* =====================================================[ exceptions] === */ + __REF + /* ---[ 0x100: RESET exception ]----------------------------------------- */ EXCEPTION_ENTRY(_tng_kernel_start) @@ -601,7 +601,7 @@ UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) */ _string_syscall_return: - .string "syscall return %ld \n\r\0" + .string "syscall r9:0x%08x -> syscall(%ld) return %ld\0" .align 4 ENTRY(_sys_call_handler) @@ -679,15 +679,25 @@ _syscall_return: _syscall_debug: l.movhi r3,hi(_string_syscall_return) l.ori r3,r3,lo(_string_syscall_return) - l.ori r27,r0,1 + l.ori r27,r0,2 l.sw -4(r1),r27 l.sw -8(r1),r11 - l.addi r1,r1,-8 + l.lwz r29,PT_ORIG_GPR11(r1) + l.sw -12(r1),r29 + l.lwz r29,PT_GPR9(r1) + l.sw -16(r1),r29 l.movhi r27,hi(_printk) l.ori r27,r27,lo(_printk) l.jalr r27 - l.nop - l.addi r1,r1,8 + l.addi r1,r1,-16 + l.addi r1,r1,16 +#endif +#if 0 +_syscall_show_regs: + l.movhi r27,hi(show_registers) + l.ori r27,r27,lo(show_registers) + l.jalr r27 + l.or r3,r1,r1 #endif _syscall_check_trace_leave: @@ -704,6 +714,10 @@ _syscall_check_trace_leave: * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ _syscall_check_work: +#ifdef CONFIG_DEBUG_RSEQ + l.jal rseq_syscall + l.ori r3,r1,0 +#endif /* Here we need to disable interrupts */ DISABLE_INTERRUPTS(r27,r29) TRACE_IRQS_OFF @@ -834,9 +848,17 @@ _syscall_badsys: /******* END SYSCALL HANDLING *******/ -/* ---[ 0xd00: Trap exception ]------------------------------------------ */ +/* ---[ 0xd00: Floating Point exception ]-------------------------------- */ -UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) +EXCEPTION_ENTRY(_fpe_trap_handler) + CLEAR_LWA_FLAG(r3) + + /* r4: EA of fault (set by EXCEPTION_HANDLE) */ + l.jal do_fpe_trap + l.addi r3,r1,0 /* pt_regs */ + + l.j _ret_from_exception + l.nop /* ---[ 0xe00: Trap exception ]------------------------------------------ */ diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 15f1b38dfe03..bd760066f1cd 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -26,15 +26,15 @@ #include <asm/asm-offsets.h> #include <linux/of_fdt.h> -#define tophys(rd,rs) \ - l.movhi rd,hi(-KERNELBASE) ;\ +#define tophys(rd,rs) \ + l.movhi rd,hi(-KERNELBASE) ;\ l.add rd,rd,rs -#define CLEAR_GPR(gpr) \ +#define CLEAR_GPR(gpr) \ l.movhi gpr,0x0 -#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ - l.movhi gpr,hi(symbol) ;\ +#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ + l.movhi gpr,hi(symbol) ;\ l.ori gpr,gpr,lo(symbol) @@ -297,19 +297,23 @@ /* temporary store r3, r9 into r1, r10 */ ;\ l.addi r1,r3,0x0 ;\ l.addi r10,r9,0x0 ;\ - /* the string referenced by r3 must be low enough */ ;\ + LOAD_SYMBOL_2_GPR(r9,_string_unhandled_exception) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_unhandled_exception) ;\ + l.nop ;\ l.mfspr r3,r0,SPR_NPC ;\ l.jal _emergency_print_nr ;\ - l.andi r3,r3,0x1f00 ;\ - /* the string referenced by r3 must be low enough */ ;\ + l.andi r3,r3,0x1f00 ;\ + LOAD_SYMBOL_2_GPR(r9,_string_epc_prefix) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_epc_prefix) ;\ + l.nop ;\ l.jal _emergency_print_nr ;\ - l.mfspr r3,r0,SPR_EPCR_BASE ;\ + l.mfspr r3,r0,SPR_EPCR_BASE ;\ + LOAD_SYMBOL_2_GPR(r9,_string_nl) ;\ + tophys (r3,r9) ;\ l.jal _emergency_print ;\ - l.ori r3,r0,lo(_string_nl) ;\ + l.nop ;\ /* end of printing */ ;\ l.addi r3,r1,0x0 ;\ l.addi r9,r10,0x0 ;\ @@ -322,21 +326,21 @@ l.addi r1,r1,-(INT_FRAME_SIZE) ;\ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ - l.sw PT_GPR12(r30),r12 ;\ + l.sw PT_GPR12(r30),r12 ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ l.sw PT_SR(r30),r12 ;\ /* save r31 */ ;\ EXCEPTION_T_LOAD_GPR30(r12) ;\ - l.sw PT_GPR30(r30),r12 ;\ + l.sw PT_GPR30(r30),r12 ;\ /* save r10 as was prior to exception */ ;\ EXCEPTION_T_LOAD_GPR10(r12) ;\ - l.sw PT_GPR10(r30),r12 ;\ - /* save PT_SP as was prior to exception */ ;\ + l.sw PT_GPR10(r30),r12 ;\ + /* save PT_SP as was prior to exception */ ;\ EXCEPTION_T_LOAD_SP(r12) ;\ l.sw PT_SP(r30),r12 ;\ - l.sw PT_GPR13(r30),r13 ;\ + l.sw PT_GPR13(r30),r13 ;\ /* --> */ ;\ /* save exception r4, set r4 = EA */ ;\ l.sw PT_GPR4(r30),r4 ;\ @@ -353,6 +357,8 @@ /* =====================================================[ exceptions] === */ + __HEAD + /* ---[ 0x100: RESET exception ]----------------------------------------- */ .org 0x100 /* Jump to .init code at _start which lives in the .head section @@ -390,7 +396,7 @@ _dispatch_do_ipage_fault: .org 0x500 EXCEPTION_HANDLE(_timer_handler) -/* ---[ 0x600: Alignment exception ]-------------------------------------- */ +/* ---[ 0x600: Alignment exception ]------------------------------------- */ .org 0x600 EXCEPTION_HANDLE(_alignment_handler) @@ -420,9 +426,9 @@ _dispatch_do_ipage_fault: .org 0xc00 EXCEPTION_HANDLE(_sys_call_handler) -/* ---[ 0xd00: Trap exception ]------------------------------------------ */ +/* ---[ 0xd00: Floating point exception ]-------------------------------- */ .org 0xd00 - UNHANDLED_EXCEPTION(_vector_0xd00) + EXCEPTION_HANDLE(_fpe_trap_handler) /* ---[ 0xe00: Trap exception ]------------------------------------------ */ .org 0xe00 @@ -502,10 +508,10 @@ _dispatch_do_ipage_fault: /* .text*/ -/* This early stuff belongs in HEAD, but some of the functions below definitely +/* This early stuff belongs in the .init.text section, but some of the functions below definitely * don't... */ - __HEAD + __INIT .global _start _start: /* Init r0 to zero as per spec */ @@ -521,6 +527,15 @@ _start: l.ori r3,r0,0x1 l.mtspr r0,r3,SPR_SR + /* + * Start the TTCR as early as possible, so that the RNG can make use of + * measurements of boot time from the earliest opportunity. Especially + * important is that the TTCR does not return zero by the time we reach + * random_init(). + */ + l.movhi r3,hi(SPR_TTMR_CR) + l.mtspr r0,r3,SPR_TTMR + CLEAR_GPR(r1) CLEAR_GPR(r2) CLEAR_GPR(r3) @@ -803,7 +818,7 @@ secondary_start: #endif -/* ========================================[ cache ]=== */ +/* ==========================================================[ cache ]=== */ /* alignment here so we don't change memory offsets with * memory controller defined @@ -1321,274 +1336,110 @@ i_pte_not_present: /* =================================================[ debugging aids ]=== */ - .align 64 -_immu_trampoline: - .space 64 -_immu_trampoline_top: - -#define TRAMP_SLOT_0 (0x0) -#define TRAMP_SLOT_1 (0x4) -#define TRAMP_SLOT_2 (0x8) -#define TRAMP_SLOT_3 (0xc) -#define TRAMP_SLOT_4 (0x10) -#define TRAMP_SLOT_5 (0x14) -#define TRAMP_FRAME_SIZE (0x18) - -ENTRY(_immu_trampoline_workaround) - // r2 EEA - // r6 is physical EEA - tophys(r6,r2) - - LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) - tophys (r3,r5) // r3 is trampoline (physical) - - LOAD_SYMBOL_2_GPR(r4,0x15000000) - l.sw TRAMP_SLOT_0(r3),r4 - l.sw TRAMP_SLOT_1(r3),r4 - l.sw TRAMP_SLOT_4(r3),r4 - l.sw TRAMP_SLOT_5(r3),r4 - - // EPC = EEA - 0x4 - l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) - l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data - l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) - l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data - - l.srli r5,r4,26 // check opcode for write access - l.sfeqi r5,0 // l.j - l.bf 0f - l.sfeqi r5,0x11 // l.jr - l.bf 1f - l.sfeqi r5,1 // l.jal - l.bf 2f - l.sfeqi r5,0x12 // l.jalr - l.bf 3f - l.sfeqi r5,3 // l.bnf - l.bf 4f - l.sfeqi r5,4 // l.bf - l.bf 5f -99: - l.nop - l.j 99b // should never happen - l.nop 1 - - // r2 is EEA - // r3 is trampoline address (physical) - // r4 is instruction - // r6 is physical(EEA) - // - // r5 - -2: // l.jal - - /* 19 20 aa aa l.movhi r9,0xaaaa - * a9 29 bb bb l.ori r9,0xbbbb - * - * where 0xaaaabbbb is EEA + 0x4 shifted right 2 - */ - - l.addi r6,r2,0x4 // this is 0xaaaabbbb - - // l.movhi r9,0xaaaa - l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 - l.sh (TRAMP_SLOT_0+0x0)(r3),r5 - l.srli r5,r6,16 - l.sh (TRAMP_SLOT_0+0x2)(r3),r5 - - // l.ori r9,0xbbbb - l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 - l.sh (TRAMP_SLOT_1+0x0)(r3),r5 - l.andi r5,r6,0xffff - l.sh (TRAMP_SLOT_1+0x2)(r3),r5 - - /* falthrough, need to set up new jump offset */ - - -0: // l.j - l.slli r6,r4,6 // original offset shifted left 6 - 2 -// l.srli r6,r6,6 // original offset shifted right 2 - - l.slli r4,r2,4 // old jump position: EEA shifted left 4 -// l.srli r4,r4,6 // old jump position: shifted right 2 - - l.addi r5,r3,0xc // new jump position (physical) - l.slli r5,r5,4 // new jump position: shifted left 4 - - // calculate new jump offset - // new_off = old_off + (old_jump - new_jump) - - l.sub r5,r4,r5 // old_jump - new_jump - l.add r5,r6,r5 // orig_off + (old_jump - new_jump) - l.srli r5,r5,6 // new offset shifted right 2 - - // r5 is new jump offset - // l.j has opcode 0x0... - l.sw TRAMP_SLOT_2(r3),r5 // write it back - - l.j trampoline_out - l.nop - -/* ----------------------------- */ - -3: // l.jalr - - /* 19 20 aa aa l.movhi r9,0xaaaa - * a9 29 bb bb l.ori r9,0xbbbb - * - * where 0xaaaabbbb is EEA + 0x4 shifted right 2 - */ - - l.addi r6,r2,0x4 // this is 0xaaaabbbb - - // l.movhi r9,0xaaaa - l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 - l.sh (TRAMP_SLOT_0+0x0)(r3),r5 - l.srli r5,r6,16 - l.sh (TRAMP_SLOT_0+0x2)(r3),r5 - - // l.ori r9,0xbbbb - l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 - l.sh (TRAMP_SLOT_1+0x0)(r3),r5 - l.andi r5,r6,0xffff - l.sh (TRAMP_SLOT_1+0x2)(r3),r5 - - l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction - l.andi r5,r5,0x3ff // clear out opcode part - l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr - l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back - - /* falthrough */ - -1: // l.jr - l.j trampoline_out - l.nop - -/* ----------------------------- */ - -4: // l.bnf -5: // l.bf - l.slli r6,r4,6 // original offset shifted left 6 - 2 -// l.srli r6,r6,6 // original offset shifted right 2 - - l.slli r4,r2,4 // old jump position: EEA shifted left 4 -// l.srli r4,r4,6 // old jump position: shifted right 2 - - l.addi r5,r3,0xc // new jump position (physical) - l.slli r5,r5,4 // new jump position: shifted left 4 - - // calculate new jump offset - // new_off = old_off + (old_jump - new_jump) - - l.add r6,r6,r4 // (orig_off + old_jump) - l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump - l.srli r6,r6,6 // new offset shifted right 2 - - // r6 is new jump offset - l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction - l.srli r4,r4,16 - l.andi r4,r4,0xfc00 // get opcode part - l.slli r4,r4,16 - l.or r6,r4,r6 // l.b(n)f new offset - l.sw TRAMP_SLOT_2(r3),r6 // write it back - - /* we need to add l.j to EEA + 0x8 */ - tophys (r4,r2) // may not be needed (due to shifts down_ - l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) - // jump position = r5 + 0x8 (0x8 compensated) - l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 - - l.slli r4,r4,4 // the amount of info in imediate of jump - l.srli r4,r4,6 // jump instruction with offset - l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot - - /* fallthrough */ - -trampoline_out: - // set up new EPC to point to our trampoline code - LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) - l.mtspr r0,r5,SPR_EPCR_BASE - - // immu_trampoline is (4x) CACHE_LINE aligned - // and only 6 instructions long, - // so we need to invalidate only 2 lines - - /* Establish cache block size - If BS=0, 16; - If BS=1, 32; - r14 contain block size - */ - l.mfspr r21,r0,SPR_ICCFGR - l.andi r21,r21,SPR_ICCFGR_CBS - l.srli r21,r21,7 - l.ori r23,r0,16 - l.sll r14,r23,r21 - - l.mtspr r0,r5,SPR_ICBIR - l.add r5,r5,r14 - l.mtspr r0,r5,SPR_ICBIR - - l.jr r9 - l.nop - - /* - * DSCR: prints a string referenced by r3. + * DESC: Prints ASCII character stored in r7 * - * PRMS: r3 - address of the first character of null - * terminated string to be printed + * PRMS: r7 - a 32-bit value with an ASCII character in the first byte + * position. * - * PREQ: UART at UART_BASE_ADD has to be initialized + * PREQ: The UART at UART_BASE_ADD has to be initialized * - * POST: caller should be aware that r3, r9 are changed + * POST: internally used but restores: + * r4 - to store UART_BASE_ADD + * r5 - for loading OFF_TXFULL / THRE,TEMT + * r6 - for storing bitmask (SERIAL_8250) */ -ENTRY(_emergency_print) +ENTRY(_emergency_putc) EMERGENCY_PRINT_STORE_GPR4 EMERGENCY_PRINT_STORE_GPR5 EMERGENCY_PRINT_STORE_GPR6 - EMERGENCY_PRINT_STORE_GPR7 -2: - l.lbz r7,0(r3) - l.sfeq r7,r0 - l.bf 9f - l.nop -// putc: l.movhi r4,hi(UART_BASE_ADD) + l.ori r4,r4,lo(UART_BASE_ADD) +#if defined(CONFIG_SERIAL_LITEUART) + /* Check OFF_TXFULL status */ +1: l.lwz r5,4(r4) + l.andi r5,r5,0xff + l.sfnei r5,0 + l.bf 1b + l.nop + + /* Write character */ + l.andi r7,r7,0xff + l.sw 0(r4),r7 +#elif defined(CONFIG_SERIAL_8250) + /* Check UART LSR THRE (hold) bit */ l.addi r6,r0,0x20 1: l.lbz r5,5(r4) l.andi r5,r5,0x20 l.sfeq r5,r6 l.bnf 1b - l.nop + l.nop + /* Write character */ l.sb 0(r4),r7 + /* Check UART LSR THRE|TEMT (hold, empty) bits */ l.addi r6,r0,0x60 1: l.lbz r5,5(r4) l.andi r5,r5,0x60 l.sfeq r5,r6 l.bnf 1b - l.nop + l.nop +#endif + EMERGENCY_PRINT_LOAD_GPR6 + EMERGENCY_PRINT_LOAD_GPR5 + EMERGENCY_PRINT_LOAD_GPR4 + l.jr r9 + l.nop + +/* + * DSCR: prints a string referenced by r3. + * + * PRMS: r3 - address of the first character of null + * terminated string to be printed + * + * PREQ: UART at UART_BASE_ADD has to be initialized + * + * POST: caller should be aware that r3, r9 are changed + */ +ENTRY(_emergency_print) + EMERGENCY_PRINT_STORE_GPR7 + EMERGENCY_PRINT_STORE_GPR9 + + /* Load character to r7, check for null terminator */ +2: l.lbz r7,0(r3) + l.sfeqi r7,0x0 + l.bf 9f + l.nop + + l.jal _emergency_putc + l.nop /* next character */ l.j 2b - l.addi r3,r3,0x1 + l.addi r3,r3,0x1 9: + EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR7 - EMERGENCY_PRINT_LOAD_GPR6 - EMERGENCY_PRINT_LOAD_GPR5 - EMERGENCY_PRINT_LOAD_GPR4 l.jr r9 - l.nop + l.nop +/* + * DSCR: prints a number in r3 in hex. + * + * PRMS: r3 - a 32-bit unsigned integer + * + * PREQ: UART at UART_BASE_ADD has to be initialized + * + * POST: caller should be aware that r3, r9 are changed + */ ENTRY(_emergency_print_nr) - EMERGENCY_PRINT_STORE_GPR4 - EMERGENCY_PRINT_STORE_GPR5 - EMERGENCY_PRINT_STORE_GPR6 EMERGENCY_PRINT_STORE_GPR7 EMERGENCY_PRINT_STORE_GPR8 + EMERGENCY_PRINT_STORE_GPR9 l.addi r8,r0,32 // shift register @@ -1600,58 +1451,39 @@ ENTRY(_emergency_print_nr) /* don't skip the last zero if number == 0x0 */ l.sfeqi r8,0x4 l.bf 2f - l.nop + l.nop l.sfeq r7,r0 l.bf 1b - l.nop + l.nop 2: l.srl r7,r3,r8 l.andi r7,r7,0xf l.sflts r8,r0 - l.bf 9f + l.bf 9f + /* Numbers greater than 9 translate to a-f */ l.sfgtui r7,0x9 l.bnf 8f - l.nop + l.nop l.addi r7,r7,0x27 -8: - l.addi r7,r7,0x30 -// putc: - l.movhi r4,hi(UART_BASE_ADD) - - l.addi r6,r0,0x20 -1: l.lbz r5,5(r4) - l.andi r5,r5,0x20 - l.sfeq r5,r6 - l.bnf 1b - l.nop - - l.sb 0(r4),r7 - - l.addi r6,r0,0x60 -1: l.lbz r5,5(r4) - l.andi r5,r5,0x60 - l.sfeq r5,r6 - l.bnf 1b - l.nop + /* Convert to ascii and output character */ +8: l.jal _emergency_putc + l.addi r7,r7,0x30 /* next character */ l.j 2b l.addi r8,r8,-0x4 9: + EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR8 EMERGENCY_PRINT_LOAD_GPR7 - EMERGENCY_PRINT_LOAD_GPR6 - EMERGENCY_PRINT_LOAD_GPR5 - EMERGENCY_PRINT_LOAD_GPR4 l.jr r9 - l.nop - + l.nop /* * This should be used for debugging only. @@ -1676,7 +1508,9 @@ ENTRY(_emergency_print_nr) ENTRY(_early_uart_init) l.movhi r3,hi(UART_BASE_ADD) + l.ori r3,r3,lo(UART_BASE_ADD) +#if defined(CONFIG_SERIAL_8250) l.addi r4,r0,0x7 l.sb 0x2(r3),r4 @@ -1694,9 +1528,10 @@ ENTRY(_early_uart_init) l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) l.sb UART_DLL(r3),r4 l.sb 0x3(r3),r5 +#endif l.jr r9 - l.nop + l.nop .align 0x1000 .global _secondary_evbar @@ -1711,13 +1546,13 @@ _secondary_evbar: .section .rodata _string_unhandled_exception: - .string "\n\rRunarunaround: Unhandled exception 0x\0" + .string "\r\nRunarunaround: Unhandled exception 0x\0" _string_epc_prefix: .string ": EPC=0x\0" _string_nl: - .string "\n\r\0" + .string "\r\n\0" /* ========================================[ page aligned structures ]=== */ diff --git a/arch/openrisc/kernel/jump_label.c b/arch/openrisc/kernel/jump_label.c new file mode 100644 index 000000000000..ab7137c23b46 --- /dev/null +++ b/arch/openrisc/kernel/jump_label.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Chen Miao + * + * Based on arch/arm/kernel/jump_label.c + */ +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <asm/bug.h> +#include <asm/cacheflush.h> +#include <asm/text-patching.h> + +bool arch_jump_label_transform_queue(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + long offset; + + offset = jump_entry_target(entry) - jump_entry_code(entry); + /* + * The actual maximum range of the l.j instruction's offset is -134,217,728 + * ~ 134,217,724 (sign 26-bit imm). + * For the original jump range, we need to right-shift N by 2 to obtain the + * instruction's offset. + */ + WARN_ON_ONCE(offset < -134217728 || offset > 134217724); + + /* 26bit imm mask */ + offset = (offset >> 2) & 0x03ffffff; + + insn = offset; + } else { + insn = OPENRISC_INSN_NOP; + } + + if (early_boot_irqs_disabled) + copy_to_kernel_nofault(addr, &insn, sizeof(insn)); + else + patch_insn_write(addr, insn); + + return true; +} + +void arch_jump_label_transform_apply(void) +{ + kick_all_cpus_sync(); +} diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c index 532013f523ac..4ac4fbaa827c 100644 --- a/arch/openrisc/kernel/module.c +++ b/arch/openrisc/kernel/module.c @@ -39,22 +39,36 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, value = sym->st_value + rel[i].r_addend; switch (ELF32_R_TYPE(rel[i].r_info)) { - case R_OR32_32: + case R_OR1K_32: *location = value; break; - case R_OR32_CONST: + case R_OR1K_LO_16_IN_INSN: *((uint16_t *)location + 1) = value; break; - case R_OR32_CONSTH: + case R_OR1K_HI_16_IN_INSN: *((uint16_t *)location + 1) = value >> 16; break; - case R_OR32_JUMPTARG: + case R_OR1K_INSN_REL_26: value -= (uint32_t)location; value >>= 2; value &= 0x03ffffff; value |= *location & 0xfc000000; *location = value; break; + case R_OR1K_32_PCREL: + value -= (uint32_t)location; + *location = value; + break; + case R_OR1K_AHI16: + /* Adjust the operand to match with a signed LO16. */ + value += 0x8000; + *((uint16_t *)location + 1) = value >> 16; + break; + case R_OR1K_SLO16: + /* Split value lower 16-bits. */ + value = ((value & 0xf800) << 10) | (value & 0x7ff); + *location = (*location & ~0x3e007ff) | value; + break; default: pr_err("module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); diff --git a/arch/openrisc/kernel/patching.c b/arch/openrisc/kernel/patching.c new file mode 100644 index 000000000000..d186172beb33 --- /dev/null +++ b/arch/openrisc/kernel/patching.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 SiFive + * Copyright (C) 2025 Chen Miao + */ + +#include <linux/mm.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/insn-def.h> +#include <asm/cacheflush.h> +#include <asm/page.h> +#include <asm/fixmap.h> +#include <asm/text-patching.h> +#include <asm/sections.h> + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static __always_inline void *patch_map(void *addr, int fixmap) +{ + uintptr_t uaddr = (uintptr_t) addr; + phys_addr_t phys; + + if (core_kernel_text(uaddr)) { + phys = __pa_symbol(addr); + } else { + struct page *page = vmalloc_to_page(addr); + BUG_ON(!page); + phys = page_to_phys(page) + offset_in_page(addr); + } + + return (void *)set_fixmap_offset(fixmap, phys); +} + +static void patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} + +static int __patch_insn_write(void *addr, u32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, OPENRISC_INSN_SIZE); + local_icache_range_inv((unsigned long)waddr, + (unsigned long)waddr + OPENRISC_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +/* + * patch_insn_write - Write a single instruction to a specified memory location + * This API provides a single-instruction patching, primarily used for runtime + * code modification. + * By the way, the insn size must be 4 bytes. + */ +int patch_insn_write(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + if ((uintptr_t) tp & 0x3) + return -EINVAL; + + ret = __patch_insn_write(tp, insn); + + return ret; +} diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 3c0c91bcdcba..73ffb9fa3118 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -14,6 +14,7 @@ */ #define __KERNEL_SYSCALLS__ +#include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -35,9 +36,11 @@ #include <linux/reboot.h> #include <linux/uaccess.h> +#include <asm/fpu.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/spr_defs.h> +#include <asm/switch_to.h> #include <linux/smp.h> @@ -52,6 +55,8 @@ void machine_restart(char *cmd) { do_kernel_restart(cmd); + __asm__("l.nop 13"); + /* Give a grace period for failure to restart of 1s */ mdelay(1000); @@ -61,6 +66,16 @@ void machine_restart(char *cmd) } /* + * This is used if a sys-off handler was not set by a power management + * driver, in this case we can assume we are on a simulator. On + * OpenRISC simulators l.nop 1 will trigger the simulator exit. + */ +static void default_power_off(void) +{ + __asm__("l.nop 1"); +} + +/* * Similar to machine_power_off, but don't shut off power. Add code * here to freeze the system for e.g. post-mortem debug purpose when * possible. This halt has nothing to do with the idle halt. @@ -75,7 +90,8 @@ void machine_halt(void) void machine_power_off(void) { printk(KERN_INFO "*** MACHINE POWER OFF ***\n"); - __asm__("l.nop 1"); + do_kernel_power_off(); + default_power_off(); } /* @@ -87,9 +103,10 @@ void arch_cpu_idle(void) raw_local_irq_enable(); if (mfspr(SPR_UPR) & SPR_UPR_PMP) mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); + raw_local_irq_disable(); } -void (*pm_power_off) (void) = machine_power_off; +void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); /* @@ -103,17 +120,11 @@ void flush_thread(void) void show_regs(struct pt_regs *regs) { - extern void show_registers(struct pt_regs *regs); - show_regs_print_info(KERN_DEFAULT); /* __PHX__ cleanup this mess */ show_registers(regs); } -void release_thread(struct task_struct *dead_task) -{ -} - /* * Copy the thread-specific (arch specific) info from the current * process to the new one p @@ -152,9 +163,11 @@ extern asmlinkage void ret_from_fork(void); */ int -copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, - struct task_struct *p, unsigned long tls) +copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + u64 clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; struct pt_regs *userregs; struct pt_regs *kregs; unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; @@ -172,10 +185,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *)sp; - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + if (unlikely(args->fn)) { memset(kregs, 0, sizeof(struct pt_regs)); - kregs->gpr[20] = usp; /* fn, kernel thread */ - kregs->gpr[22] = arg; + kregs->gpr[20] = (unsigned long)args->fn; + kregs->gpr[22] = (unsigned long)args->fn_arg; } else { *userregs = *current_pt_regs(); @@ -232,6 +245,8 @@ struct task_struct *__switch_to(struct task_struct *old, local_irq_save(flags); + save_fpu(current); + /* current_set is an array of saved current pointers * (one for each cpu). we need them at user->kernel transition, * while we save them at kernel->user transition @@ -244,6 +259,8 @@ struct task_struct *__switch_to(struct task_struct *old, current_thread_info_set[smp_processor_id()] = new_ti; last = (_switch(old_ti, new_ti))->task; + restore_fpu(current); + local_irq_restore(flags); return last; diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c index 19e6008bf114..e424e9bd12a7 100644 --- a/arch/openrisc/kernel/prom.c +++ b/arch/openrisc/kernel/prom.c @@ -22,6 +22,6 @@ void __init early_init_devtree(void *params) { - early_init_dt_scan(params); + early_init_dt_scan(params, __pa(params)); memblock_allow_resize(); } diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 4d60ae2a12fa..552489b24855 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -22,12 +22,15 @@ #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/regset.h> -#include <linux/tracehook.h> #include <linux/elf.h> #include <asm/thread_info.h> #include <asm/page.h> +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); + +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); + /* * Copy the thread state to a regset that can be interpreted by userspace. * @@ -67,10 +70,9 @@ static int genregs_set(struct task_struct *target, int ret; /* ignore r0 */ - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); /* r1 - r31 */ - if (!ret) - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->gpr+1, 4, 4*32); /* PC */ if (!ret) @@ -81,28 +83,64 @@ static int genregs_set(struct task_struct *target, * the Supervision register */ if (!ret) - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 4*33, -1); + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 4*33, -1); return ret; } +#ifdef CONFIG_FPU +/* + * As OpenRISC shares GPRs and floating point registers we don't need to export + * the floating point registers again. So here we only export the fpcsr special + * purpose register. + */ +static int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_store(&to, target->thread.fpcsr); +} + +static int fpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + /* FPCSR */ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpcsr, 0, 4); +} +#endif + /* * Define the register sets available on OpenRISC under Linux */ enum or1k_regset { REGSET_GENERAL, +#ifdef CONFIG_FPU + REGSET_FPU, +#endif }; static const struct user_regset or1k_regsets[] = { [REGSET_GENERAL] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .regset_get = genregs_get, .set = genregs_set, }, +#ifdef CONFIG_FPU + [REGSET_FPU] = { + USER_REGSET_NOTE_TYPE(PRFPREG), + .n = sizeof(struct __or1k_fpu_state) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .regset_get = fpregs_get, + .set = fpregs_set, + }, +#endif }; static const struct user_regset_view user_or1k_native_view = { @@ -122,6 +160,102 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) * in exit.c or in signal.c. */ +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(gpr2), + REG_OFFSET_NAME(gpr3), + REG_OFFSET_NAME(gpr4), + REG_OFFSET_NAME(gpr5), + REG_OFFSET_NAME(gpr6), + REG_OFFSET_NAME(gpr7), + REG_OFFSET_NAME(gpr8), + REG_OFFSET_NAME(gpr9), + REG_OFFSET_NAME(gpr10), + REG_OFFSET_NAME(gpr11), + REG_OFFSET_NAME(gpr12), + REG_OFFSET_NAME(gpr13), + REG_OFFSET_NAME(gpr14), + REG_OFFSET_NAME(gpr15), + REG_OFFSET_NAME(gpr16), + REG_OFFSET_NAME(gpr17), + REG_OFFSET_NAME(gpr18), + REG_OFFSET_NAME(gpr19), + REG_OFFSET_NAME(gpr20), + REG_OFFSET_NAME(gpr21), + REG_OFFSET_NAME(gpr22), + REG_OFFSET_NAME(gpr23), + REG_OFFSET_NAME(gpr24), + REG_OFFSET_NAME(gpr25), + REG_OFFSET_NAME(gpr26), + REG_OFFSET_NAME(gpr27), + REG_OFFSET_NAME(gpr28), + REG_OFFSET_NAME(gpr29), + REG_OFFSET_NAME(gpr30), + REG_OFFSET_NAME(gpr31), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(orig_gpr11), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} /* * Called by kernel/ptrace.c when detaching.. @@ -159,7 +293,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -181,5 +315,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index 0cd04d936a7a..000a9cc10e6f 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -113,21 +113,6 @@ static void print_cpuinfo(void) return; } - if (upr & SPR_UPR_DCP) - printk(KERN_INFO - "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", - cpuinfo->dcache_size, cpuinfo->dcache_block_size, - cpuinfo->dcache_ways); - else - printk(KERN_INFO "-- dcache disabled\n"); - if (upr & SPR_UPR_ICP) - printk(KERN_INFO - "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", - cpuinfo->icache_size, cpuinfo->icache_block_size, - cpuinfo->icache_ways); - else - printk(KERN_INFO "-- icache disabled\n"); - if (upr & SPR_UPR_DMP) printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n", 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), @@ -152,47 +137,16 @@ static void print_cpuinfo(void) printk(KERN_INFO "-- custom unit(s)\n"); } -static struct device_node *setup_find_cpu_node(int cpu) -{ - u32 hwid; - struct device_node *cpun; - - for_each_of_cpu_node(cpun) { - if (of_property_read_u32(cpun, "reg", &hwid)) - continue; - if (hwid == cpu) - return cpun; - } - - return NULL; -} - void __init setup_cpuinfo(void) { struct device_node *cpu; - unsigned long iccfgr, dccfgr; - unsigned long cache_set_size; int cpu_id = smp_processor_id(); struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id]; - cpu = setup_find_cpu_node(cpu_id); + cpu = of_get_cpu_node(cpu_id, NULL); if (!cpu) panic("Couldn't find CPU%d in device tree...\n", cpu_id); - iccfgr = mfspr(SPR_ICCFGR); - cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); - cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); - cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); - cpuinfo->icache_size = - cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size; - - dccfgr = mfspr(SPR_DCCFGR); - cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); - cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); - cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); - cpuinfo->dcache_size = - cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size; - if (of_property_read_u32(cpu, "clock-frequency", &cpuinfo->clock_frequency)) { printk(KERN_WARNING @@ -255,7 +209,7 @@ static inline unsigned long extract_value(unsigned long reg, unsigned long mask) void calibrate_delay(void) { const int *val; - struct device_node *cpu = setup_find_cpu_node(smp_processor_id()); + struct device_node *cpu = of_get_cpu_node(smp_processor_id(), NULL); val = of_get_property(cpu, "clock-frequency", NULL); if (!val) @@ -270,6 +224,9 @@ void calibrate_delay(void) void __init setup_arch(char **cmdline_p) { + /* setup memblock allocator */ + setup_memory(); + unflatten_and_copy_device_tree(); setup_cpuinfo(); @@ -292,9 +249,8 @@ void __init setup_arch(char **cmdline_p) initrd_below_start_ok = 1; } #endif - - /* setup memblock allocator */ - setup_memory(); + /* perform jump_table sorting before paging_init locks down read only memory */ + jump_label_init(); /* paging_init() sets up the MMU and marks all pages as reserved */ paging_init(); @@ -309,14 +265,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned int vr, cpucfgr; unsigned int avr; unsigned int version; +#ifdef CONFIG_SMP struct cpuinfo_or1k *cpuinfo = v; + seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); +#endif vr = mfspr(SPR_VR); cpucfgr = mfspr(SPR_CPUCFGR); -#ifdef CONFIG_SMP - seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); -#endif if (vr & SPR_VR_UVRP) { vr = mfspr(SPR_VR2); version = vr & SPR_VR2_VER; @@ -335,14 +291,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV); } seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ); - seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size); - seq_printf(m, "dcache block size\t: %d bytes\n", - cpuinfo->dcache_block_size); - seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways); - seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size); - seq_printf(m, "icache block size\t: %d bytes\n", - cpuinfo->icache_block_size); - seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways); seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n", 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 92c5b70740f5..f70a13ee0593 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -21,8 +21,9 @@ #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> -#include <linux/tracehook.h> +#include <linux/resume_user_mode.h> +#include <asm/fpu.h> #include <asm/processor.h> #include <asm/syscall.h> #include <asm/ucontext.h> @@ -34,6 +35,42 @@ struct rt_sigframe { unsigned char retcode[16]; /* trampoline code */ }; +asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs); + +asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, + int syscall); + +#ifdef CONFIG_FPU +static long restore_fp_state(struct sigcontext __user *sc) +{ + long err; + + err = __copy_from_user(¤t->thread.fpcsr, &sc->fpcsr, sizeof(unsigned long)); + if (unlikely(err)) + return err; + + /* Restore the FPU state */ + restore_fpu(current); + + return 0; +} + +static long save_fp_state(struct sigcontext __user *sc) +{ + long err; + + /* Sync the user FPU state so we can copy to sigcontext */ + save_fpu(current); + + err = __copy_to_user(&sc->fpcsr, ¤t->thread.fpcsr, sizeof(unsigned long)); + + return err; +} +#else +#define save_fp_state(sc) (0) +#define restore_fp_state(sc) (0) +#endif + static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { @@ -50,6 +87,7 @@ static int restore_sigcontext(struct pt_regs *regs, err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long)); err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long)); + err |= restore_fp_state(sc); /* make sure the SM-bit is cleared so user-mode cannot fool us */ regs->sr &= ~SPR_SR_SM; @@ -112,6 +150,7 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); + err |= save_fp_state(sc); return err; } @@ -205,6 +244,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) { int ret; + rseq_signal_deliver(ksig, regs); + ret = setup_rt_frame(ksig, sigmask_to_save(), regs); signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); @@ -222,7 +263,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * mode below. */ -int do_signal(struct pt_regs *regs, int syscall) +static int do_signal(struct pt_regs *regs, int syscall) { struct ksignal ksig; unsigned long continue_addr = 0; @@ -309,7 +350,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } syscall = 0; } else { - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } } local_irq_disable(); diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index 27041db2c8b0..86da4bc5ee0b 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -23,6 +23,8 @@ #include <asm/cacheflush.h> #include <asm/time.h> +asmlinkage __init void secondary_start_kernel(void); + static void (*smp_cross_call)(const struct cpumask *, unsigned int); unsigned long secondary_release = -1; @@ -55,10 +57,6 @@ static void boot_secondary(unsigned int cpu, struct task_struct *idle) spin_unlock(&boot_lock); } -void __init smp_prepare_boot_cpu(void) -{ -} - void __init smp_init_cpus(void) { struct device_node *cpu; @@ -173,7 +171,7 @@ void handle_IPI(unsigned int ipi_msg) } } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -197,12 +195,6 @@ void smp_send_stop(void) smp_call_function(stop_this_cpu, NULL, 0); } -/* not supported, yet */ -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} - void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { smp_cross_call = fn; diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c index 3d18008310e4..b2f57e2538f7 100644 --- a/arch/openrisc/kernel/sys_call_table.c +++ b/arch/openrisc/kernel/sys_call_table.c @@ -16,9 +16,14 @@ #include <asm/syscalls.h> -#undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) + +#define sys_mmap2 sys_mmap_pgoff +#define sys_clone __sys_clone +#define sys_clone3 __sys_clone3 +#define sys_fork __sys_fork void *sys_call_table[__NR_syscalls] = { -#include <asm/unistd.h> +#include <asm/syscall_table_32.h> }; diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c index 6d18989d63d0..764c7bfb5df3 100644 --- a/arch/openrisc/kernel/time.c +++ b/arch/openrisc/kernel/time.c @@ -23,6 +23,9 @@ #include <linux/of_clk.h> #include <asm/cpuinfo.h> +#include <asm/time.h> + +irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs); /* Test the timer ticks to count, used in sync routine */ inline void openrisc_timer_set(unsigned long count) @@ -61,7 +64,7 @@ static int openrisc_timer_set_next_event(unsigned long delta, * timers) we cannot enable the PERIODIC feature. The tick timer can run using * one-shot events, so no problem. */ -DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); +static DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); void openrisc_clockevent_init(void) { diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 0446a3c34372..c195be9cc9fc 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -30,20 +30,38 @@ #include <linux/kallsyms.h> #include <linux/uaccess.h> +#include <asm/bug.h> +#include <asm/fpu.h> #include <asm/io.h> +#include <asm/processor.h> #include <asm/unwinder.h> #include <asm/sections.h> -int kstack_depth_to_print = 0x180; int lwa_flag; -unsigned long __user *lwa_addr; +static unsigned long __user *lwa_addr; -void print_trace(void *data, unsigned long addr, int reliable) +asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector); +asmlinkage void do_trap(struct pt_regs *regs, unsigned long address); +asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address); +asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address); +asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address); +asmlinkage void do_illegal_instruction(struct pt_regs *regs, + unsigned long address); + +static void print_trace(void *data, unsigned long addr, int reliable) { const char *loglvl = data; - printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ", - (void *) addr); + pr_info("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ", + (void *) addr); +} + +static void print_data(unsigned long base_addr, unsigned long word, int i) +{ + if (i == 0) + pr_info("(%08lx:)\t%08lx", base_addr + (i * 4), word); + else + pr_info(" %08lx:\t%08lx", base_addr + (i * 4), word); } /* displays a short stack trace */ @@ -52,7 +70,7 @@ void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl if (esp == NULL) esp = (unsigned long *)&esp; - printk("%sCall trace:\n", loglvl); + pr_info("%sCall trace:\n", loglvl); unwind_stack((void *)loglvl, esp, print_trace); } @@ -66,134 +84,72 @@ void show_registers(struct pt_regs *regs) if (user_mode(regs)) in_kernel = 0; - printk("CPU #: %d\n" - " PC: %08lx SR: %08lx SP: %08lx\n", - smp_processor_id(), regs->pc, regs->sr, regs->sp); - printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", - 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); - printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", - regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); - printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", - regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); - printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", - regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); - printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", - regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); - printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", - regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); - printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", - regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); - printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", - regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); - printk(" RES: %08lx oGPR11: %08lx\n", - regs->gpr[11], regs->orig_gpr11); - - printk("Process %s (pid: %d, stackpage=%08lx)\n", - current->comm, current->pid, (unsigned long)current); + pr_info("CPU #: %d\n" + " PC: %08lx SR: %08lx SP: %08lx\n", + smp_processor_id(), regs->pc, regs->sr, regs->sp); + pr_info("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", + 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); + pr_info("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", + regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); + pr_info("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", + regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); + pr_info("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", + regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); + pr_info("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", + regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); + pr_info("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", + regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); + pr_info("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", + regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); + pr_info("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", + regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); + pr_info(" RES: %08lx oGPR11: %08lx\n", + regs->gpr[11], regs->orig_gpr11); + + pr_info("Process %s (pid: %d, stackpage=%08lx)\n", + current->comm, current->pid, (unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (in_kernel) { - printk("\nStack: "); + pr_info("\nStack: "); show_stack(NULL, (unsigned long *)esp, KERN_EMERG); - printk("\nCode: "); - if (regs->pc < PAGE_OFFSET) - goto bad; + if (esp < PAGE_OFFSET) + goto bad_stack; - for (i = -24; i < 24; i++) { - unsigned char c; - if (__get_user(c, &((unsigned char *)regs->pc)[i])) { -bad: - printk(" Bad PC value."); + pr_info("\n"); + for (i = -8; i < 24; i += 1) { + unsigned long word; + + if (__get_user(word, &((unsigned long *)esp)[i])) { +bad_stack: + pr_info(" Bad Stack value."); break; } - if (i == 0) - printk("(%02x) ", c); - else - printk("%02x ", c); + print_data(esp, word, i); } - } - printk("\n"); -} -void nommu_dump_state(struct pt_regs *regs, - unsigned long ea, unsigned long vector) -{ - int i; - unsigned long addr, stack = regs->sp; - - printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector); - - printk("CPU #: %d\n" - " PC: %08lx SR: %08lx SP: %08lx\n", - 0, regs->pc, regs->sr, regs->sp); - printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", - 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); - printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", - regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); - printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", - regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); - printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", - regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); - printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", - regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); - printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", - regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); - printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", - regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); - printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", - regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); - printk(" RES: %08lx oGPR11: %08lx\n", - regs->gpr[11], regs->orig_gpr11); - - printk("Process %s (pid: %d, stackpage=%08lx)\n", - ((struct task_struct *)(__pa(current)))->comm, - ((struct task_struct *)(__pa(current)))->pid, - (unsigned long)current); - - printk("\nStack: "); - printk("Stack dump [0x%08lx]:\n", (unsigned long)stack); - for (i = 0; i < kstack_depth_to_print; i++) { - if (((long)stack & (THREAD_SIZE - 1)) == 0) - break; - stack++; - - printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4, - *((unsigned long *)(__pa(stack)))); - } - printk("\n"); - - printk("Call Trace: "); - i = 1; - while (((long)stack & (THREAD_SIZE - 1)) != 0) { - addr = *((unsigned long *)__pa(stack)); - stack++; - - if (kernel_text_address(addr)) { - if (i && ((i % 6) == 0)) - printk("\n "); - printk(" [<%08lx>]", addr); - i++; - } - } - printk("\n"); + pr_info("\nCode: "); + if (regs->pc < PAGE_OFFSET) + goto bad; - printk("\nCode: "); + for (i = -6; i < 6; i += 1) { + unsigned long word; - for (i = -24; i < 24; i++) { - unsigned char c; - c = ((unsigned char *)(__pa(regs->pc)))[i]; + if (__get_user(word, &((unsigned long *)regs->pc)[i])) { +bad: + pr_info(" Bad PC value."); + break; + } - if (i == 0) - printk("(%02x) ", c); - else - printk("%02x ", c); + print_data(regs->pc, word, i); + } } - printk("\n"); + pr_info("\n"); } /* This is normally the 'Oops' routine */ @@ -201,10 +157,10 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err) { console_verbose(); - printk("\n%s#: %04lx\n", str, err & 0xffff); + pr_emerg("\n%s#: %04lx\n", str, err & 0xffff); show_registers(regs); #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION - printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); + pr_emerg("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); /* shut down interrupts */ local_irq_disable(); @@ -215,25 +171,53 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err) make_task_dead(SIGSEGV); } -/* This is normally the 'Oops' routine */ -void die_if_kernel(const char *str, struct pt_regs *regs, long err) +asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector) { - if (user_mode(regs)) - return; - - die(str, regs, err); + pr_emerg("Unable to handle exception at EA =0x%x, vector 0x%x", + ea, vector); + die("Oops", regs, 9); } -void unhandled_exception(struct pt_regs *regs, int ea, int vector) +asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address) { - printk("Unable to handle exception at EA =0x%x, vector 0x%x", - ea, vector); - die("Oops", regs, 9); + if (user_mode(regs)) { + int code = FPE_FLTUNK; +#ifdef CONFIG_FPU + unsigned long fpcsr; + + save_fpu(current); + fpcsr = current->thread.fpcsr; + + if (fpcsr & SPR_FPCSR_IVF) + code = FPE_FLTINV; + else if (fpcsr & SPR_FPCSR_OVF) + code = FPE_FLTOVF; + else if (fpcsr & SPR_FPCSR_UNF) + code = FPE_FLTUND; + else if (fpcsr & SPR_FPCSR_DZF) + code = FPE_FLTDIV; + else if (fpcsr & SPR_FPCSR_IXF) + code = FPE_FLTRES; + + /* Clear all flags */ + current->thread.fpcsr &= ~SPR_FPCSR_ALLF; + restore_fpu(current); +#endif + force_sig_fault(SIGFPE, code, (void __user *)regs->pc); + } else { + pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc); + die("Die:", regs, SIGFPE); + } } asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) { - force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + if (user_mode(regs)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + } else { + pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc); + die("Die:", regs, SIGILL); + } } asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) @@ -242,8 +226,7 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) /* Send a SIGBUS */ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address); } else { - printk("KERNEL: Unaligned Access 0x%.8lx\n", address); - show_registers(regs); + pr_emerg("KERNEL: Unaligned Access 0x%.8lx\n", address); die("Die:", regs, address); } @@ -255,8 +238,7 @@ asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) /* Send a SIGBUS */ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); } else { /* Kernel mode */ - printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); - show_registers(regs); + pr_emerg("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); die("Die:", regs, address); } } @@ -450,9 +432,8 @@ asmlinkage void do_illegal_instruction(struct pt_regs *regs, /* Send a SIGILL */ force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address); } else { /* Kernel mode */ - printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", - address); - show_registers(regs); + pr_emerg("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", + address); die("Die:", regs, address); } } diff --git a/arch/openrisc/kernel/unwinder.c b/arch/openrisc/kernel/unwinder.c index 8ae15c2c1845..c6ad6f867a6a 100644 --- a/arch/openrisc/kernel/unwinder.c +++ b/arch/openrisc/kernel/unwinder.c @@ -25,7 +25,7 @@ struct or1k_frameinfo { /* * Verify a frameinfo structure. The return address should be a valid text * address. The frame pointer may be null if its the last frame, otherwise - * the frame pointer should point to a location in the stack after the the + * the frame pointer should point to a location in the stack after the * top of the next frame up. */ static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo) diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index d5c7bb0fae57..049bff45f612 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -50,9 +50,9 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { _stext = .; + HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT @@ -84,8 +84,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; - HEAD_TEXT_SECTION - /* Page aligned */ INIT_TEXT_SECTION(PAGE_SIZE) diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c index 036ae57180ef..5e89e4131304 100644 --- a/arch/openrisc/lib/delay.c +++ b/arch/openrisc/lib/delay.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> +#include <linux/timex.h> #include <asm/param.h> #include <asm/delay.h> #include <asm/timex.h> diff --git a/arch/openrisc/lib/memcpy.c b/arch/openrisc/lib/memcpy.c index fe2177628fd9..e2af9b510804 100644 --- a/arch/openrisc/lib/memcpy.c +++ b/arch/openrisc/lib/memcpy.c @@ -101,7 +101,7 @@ void *memcpy(void *dest, __const void *src, __kernel_size_t n) */ void *memcpy(void *dest, __const void *src, __kernel_size_t n) { - unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src; + unsigned char *d, *s; uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src; /* If both source and dest are word aligned copy words */ diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 534a52ec5e66..f33df46dae4e 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -14,44 +14,86 @@ #include <asm/spr_defs.h> #include <asm/cache.h> #include <asm/cacheflush.h> +#include <asm/cpuinfo.h> #include <asm/tlbflush.h> -static __always_inline void cache_loop(struct page *page, const unsigned int reg) +/* + * Check if the cache component exists. + */ +bool cpu_cache_is_present(const unsigned int cache_type) { - unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; - unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); + unsigned long upr = mfspr(SPR_UPR); + unsigned long mask = SPR_UPR_UP | cache_type; + + return !((upr & mask) ^ mask); +} + +static __always_inline void cache_loop(unsigned long paddr, unsigned long end, + const unsigned short reg, const unsigned int cache_type) +{ + if (!cpu_cache_is_present(cache_type)) + return; - while (line < paddr + PAGE_SIZE) { - mtspr(reg, line); - line += L1_CACHE_BYTES; + while (paddr < end) { + mtspr(reg, paddr); + paddr += L1_CACHE_BYTES; } } +static __always_inline void cache_loop_page(struct page *page, const unsigned short reg, + const unsigned int cache_type) +{ + unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; + unsigned long end = paddr + PAGE_SIZE; + + paddr &= ~(L1_CACHE_BYTES - 1); + + cache_loop(paddr, end, reg, cache_type); +} + void local_dcache_page_flush(struct page *page) { - cache_loop(page, SPR_DCBFR); + cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP); } EXPORT_SYMBOL(local_dcache_page_flush); void local_icache_page_inv(struct page *page) { - cache_loop(page, SPR_ICBIR); + cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP); } EXPORT_SYMBOL(local_icache_page_inv); +void local_dcache_range_flush(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP); +} + +void local_dcache_range_inv(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP); +} + +void local_icache_range_inv(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP); +} + void update_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); + struct folio *folio = page_folio(pfn_to_page(pfn)); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); /* * Since icaches do not snoop for updated data on OpenRISC, we * must write back and invalidate any dirty pages manually. We * can skip data pages, since they will not end up in icaches. */ - if ((vma->vm_flags & VM_EXEC) && dirty) - sync_icache_dcache(page); -} + if ((vma->vm_flags & VM_EXEC) && dirty) { + unsigned int nr = folio_nr_pages(folio); + while (nr--) + sync_icache_dcache(folio_page(folio, nr)); + } +} diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 80bb66ad42f6..29e232d78d82 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -18,21 +18,21 @@ #include <linux/perf_event.h> #include <linux/uaccess.h> +#include <asm/bug.h> +#include <asm/mmu_context.h> #include <asm/siginfo.h> #include <asm/signal.h> #define NUM_TLB_ENTRIES 64 #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1)) -unsigned long pte_misses; /* updated by do_page_fault() */ -unsigned long pte_errors; /* updated by do_page_fault() */ - /* __PHX__ :: - check the vmalloc_fault in do_page_fault() * - also look into include/asm/mmu_context.h */ volatile pgd_t *current_pgd[NR_CPUS]; -extern void __noreturn die(char *, struct pt_regs *, long); +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long vector, int write_acc); /* * This routine handles page faults. It determines the address, @@ -129,8 +129,9 @@ retry: if (address + PAGE_SIZE < regs->sp) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so @@ -164,7 +165,14 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -223,8 +231,6 @@ no_context: { const struct exception_table_entry *entry; - __asm__ __volatile__("l.nop 42"); - if ((entry = search_exception_tables(regs->pc)) != NULL) { /* Adjust the instruction pointer in the stackframe */ regs->pc = entry->fixup; @@ -252,9 +258,6 @@ no_context: */ out_of_memory: - __asm__ __volatile__("l.nop 42"); - __asm__ __volatile__("l.nop 1"); - mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 97305bde1b16..9382d9a0ec78 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -35,11 +35,10 @@ #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <asm/sections.h> +#include <asm/cacheflush.h> int mem_init_done; -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - static void __init zone_sizes_init(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; @@ -125,8 +124,6 @@ static void __init map_ram(void) void __init paging_init(void) { - extern void tlb_init(void); - int i; printk(KERN_INFO "Setting up paging and PTEs.\n"); @@ -180,8 +177,8 @@ void __init paging_init(void) barrier(); /* Invalidate instruction caches after code modification */ - mtspr(SPR_ICBIR, 0x900); - mtspr(SPR_ICBIR, 0xa00); + local_icache_block_inv(0x900); + local_icache_block_inv(0xa00); /* New TLB miss handlers and kernel page tables are in now place. * Make sure that page flags get updated for all pages in TLB by @@ -197,16 +194,71 @@ void __init mem_init(void) { BUG_ON(!mem_map); - max_mapnr = max_low_pfn; - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); - /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - memblock_free_all(); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; } + +static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot) +{ + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + p4d = p4d_offset(pgd_offset_k(va), va); + pud = pud_offset(p4d, va); + pmd = pmd_offset(pud, va); + pte = pte_alloc_kernel(pmd, va); + + if (pte == NULL) + return -ENOMEM; + + if (pgprot_val(prot)) + set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot)); + else + pte_clear(&init_mm, va, pte); + + local_flush_tlb_page(NULL, va); + return 0; +} + +/* + * __set_fix must now support both EARLYCON and TEXT_POKE mappings, + * which are used at different stages of kernel execution. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + map_page(address, phys, prot); +} + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index daae13a76743..3b352f97fecb 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -22,89 +22,7 @@ extern int mem_init_done; -static unsigned int fixmaps_used __initdata; - /* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size) -{ - phys_addr_t p; - unsigned long v; - unsigned long offset, last_addr; - struct vm_struct *area = NULL; - - /* Don't allow wraparound or zero size */ - last_addr = addr + size - 1; - if (!size || last_addr < addr) - return NULL; - - /* - * Mappings have to be page-aligned - */ - offset = addr & ~PAGE_MASK; - p = addr & PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - p; - - if (likely(mem_init_done)) { - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - v = (unsigned long)area->addr; - } else { - if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS) - return NULL; - v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used); - fixmaps_used += (size >> PAGE_SHIFT); - } - - if (ioremap_page_range(v, v + size, p, - __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) { - if (likely(mem_init_done)) - vfree(area->addr); - else - fixmaps_used -= (size >> PAGE_SHIFT); - return NULL; - } - - return (void __iomem *)(offset + (char *)v); -} -EXPORT_SYMBOL(ioremap); - -void iounmap(void __iomem *addr) -{ - /* If the page is from the fixmap pool then we just clear out - * the fixmap mapping. - */ - if (unlikely((unsigned long)addr > FIXADDR_START)) { - /* This is a bit broken... we don't really know - * how big the area is so it's difficult to know - * how many fixed pages to invalidate... - * just flush tlb and hope for the best... - * consider this a FIXME - * - * Really we should be clearing out one or more page - * table entries for these virtual addresses so that - * future references cause a page fault... for now, we - * rely on two things: - * i) this code never gets called on known boards - * ii) invalid accesses to the freed areas aren't made - */ - flush_tlb_all(); - return; - } - - return vfree((void *)(PAGE_MASK & (unsigned long)addr)); -} -EXPORT_SYMBOL(iounmap); - -/** * OK, this one's a bit tricky... ioremap can get called before memory is * initialized (early serial console does this) and will want to alloc a page * for its mapping. No userspace pages will ever get allocated before memory @@ -118,12 +36,9 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) pte_t *pte; if (likely(mem_init_done)) { - pte = (pte_t *)get_zeroed_page(GFP_KERNEL); + pte = __pte_alloc_one_kernel(mm); } else { - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } return pte; diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c index 2b6feabf6381..3115f2e4f864 100644 --- a/arch/openrisc/mm/tlb.c +++ b/arch/openrisc/mm/tlb.c @@ -128,7 +128,7 @@ void local_flush_tlb_mm(struct mm_struct *mm) /* Was seeing bugs with the mm struct passed to us. Scrapped most of this function. */ - /* Several architctures do this */ + /* Several architectures do this */ local_flush_tlb_all(); } @@ -182,12 +182,3 @@ void destroy_context(struct mm_struct *mm) flush_tlb_mm(mm); } - -/* called once during VM initialization, from init.c */ - -void __init tlb_init(void) -{ - /* Do nothing... */ - /* invalidate the entire TLB */ - /* flush_tlb_all(); */ -} |
